diff --git "a/2864.jsonl" "b/2864.jsonl" new file mode 100644--- /dev/null +++ "b/2864.jsonl" @@ -0,0 +1,640 @@ +{"seq_id":"342317803","text":"import datetime as dt\nfrom project import db\nfrom project.models.appointment import Appointment, add_appointment\nfrom project.models.participant import Participant, create_participant\nfrom project.models.event import Event, add_event\nfrom project.models.availability import Availability, create_availability\nfrom project.models.user import User, add_user\nfrom project.test.test_base import TestBase\n\n\nclass ParticipantModelTest(TestBase):\n def test_participant_model(self):\n \"\"\"Tests whether the participant model is working correctly.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n availability = create_availability()\n add_event(user_id=user.id, availability=availability)\n db.session.commit()\n event = Event.query.first()\n name = 'User Jeff'\n email = 'jeff@email.com'\n add_appointment(event_id=event.id,\n participants=[create_participant(name=name,\n email=email)])\n db.session.commit()\n participant = Participant.query.filter_by(name=name).first()\n\n self.assertEqual(participant.email, email)\n\n def test_many_participants(self):\n \"\"\"Tests whether an appointment can have many participants.\"\"\"\n add_user()\n db.session.commit()\n user = User.query.first()\n availability = create_availability()\n add_event(user_id=user.id, availability=availability)\n db.session.commit()\n event = Event.query.first()\n name = ['User Jeff', 'Jeff User', 'Reff Usej']\n email = ['jeff@email.com', 'user@email.com', 'jeff@user.com']\n comments = 'I think this whole Jeff and User thing is getting ' \\\n 'confusing.'\n add_appointment(event_id=event.id,\n participants=[create_participant(name[i], email[i]) for\n i in range(2)],\n comments=comments)\n db.session.commit()\n participants = db.session.query(Participant).\\\n filter(Appointment.comments == comments)\n\n for j in range(2):\n self.assertEqual(participants[j].name, name[j])\n self.assertEqual(participants[j].email, email[j])\n","sub_path":"server/project/test/participant_test.py","file_name":"participant_test.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"118916647","text":"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: MIT-0\n\n# PyPI dependencies and pip command to install them:\n# pip3 install boto3 requests requests-auth-aws-sigv4\n\n# This python script to PUT/Update a FHIR resource requires the following mandatory CLI arguments. \n# Run \"hl_boto_put.py --help\" for more details\n # HealthLake data store endpoint\n # Resource Path\n # File containing the Request body\n # AWS region\n\nimport boto3\nimport requests\nimport argparse\nimport json\nfrom requests_auth_aws_sigv4 import AWSSigV4\n\n# Define the required input arguments\nargs_parser = argparse.ArgumentParser()\nargs_parser.add_argument(\n \"data_store_endpoint\", \n help=\"The HealthLake Data Store Endpoint including the resource path. \\\n Example: https://healthlake..amazonaws.com/datastore/\")\nargs_parser.add_argument(\n \"request_body_file\", \n help=\"The full file path where the PUT request body contents are available. \\\n Refer to https://docs.aws.amazon.com/healthlake/latest/devguide/update-example.html for sample \\\n PUT request body\")\nargs_parser.add_argument(\n \"region\", \n help=\"The AWS region in which this HealthLake Data Store Exists. Example: us-east-1\")\n\n# Parse the input arguments\ninput_args = args_parser.parse_args()\n\ndata_store_endpoint = input_args.data_store_endpoint\nresource_path = input_args.resource_path\nrequest_body_file = input_args.request_body_file\nregion = input_args.region\n\n# Frame the resource endpoint\nresource_endpoint = data_store_endpoint+resource_path\n\nsession = boto3.session.Session(region_name=region)\nclient = session.client(\"healthlake\")\n\n# Frame authorization\nauth = AWSSigV4(\"healthlake\", session=session)\n\n# Read the request body from input file\nwith open(request_body_file) as json_body:\n json_data = json.load(json_body)\n\n# Calling data store FHIR endpoint using SigV4 auth\nr = requests.put(resource_endpoint, json=json_data, auth=auth)\nprint(r.json())","sub_path":"hl-CRUD-operations/python/hl_boto_put.py","file_name":"hl_boto_put.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"215364410","text":"#!/usr/bin/python\nfrom apiclient.discovery import build\nfrom apiclient.errors import HttpError\nfrom oauth2client.tools import argparser\nimport datetime\nimport json\nimport urllib.request\nimport os, django\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"GiveMeCon.settings\")\ndjango.setup()\n\nfrom video.models import Video\n\n# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps\n# tab of\n# https://cloud.google.com/console\n# Please ensure that you have enabled the YouTube Data API for your project.\nDEVELOPER_KEY = \"AIzaSyBAEqG4C5JcGU8LW3WiQW19QbKWwHbpLOE\"\nYOUTUBE_API_SERVICE_NAME = \"youtube\"\nYOUTUBE_API_VERSION = \"v3\"\nkeyword=\"먹방\"\n\ncurrent=datetime.date.today()\nget_time= current-datetime.timedelta(days=7)\nbefore_day=get_time.strftime(\"%Y-%m-%d\")\n\nremainder_time=\"T00:00:00Z\"\ninsert_day=before_day+remainder_time\nprint(insert_day)\n\ndef Video_info(video_id):\n url= f\"https://www.googleapis.com/youtube/v3/videos?part=snippet,statistics&id={video_id}&key={DEVELOPER_KEY}\"\n\n json_url =urllib.request.urlopen(url)\n data=json.loads(json_url.read())\n #data=json.dumps(read_data, indent=4 ,sort_keys=True)\n\n Video.objects.create(title=data[\"items\"][0][\"snippet\"][\"title\"], \\\n view=data[\"items\"][0][\"statistics\"][\"viewCount\"], \\\n thumbnail=data[\"items\"][0][\"snippet\"][\"thumbnails\"][\"default\"][\"url\"])\n\n print(\"제목:\"+data[\"items\"][0][\"snippet\"][\"title\"])\n print(\"썸네일: \"+data[\"items\"][0][\"snippet\"][\"thumbnails\"][\"default\"][\"url\"])\n print(\"조회수:\"+data[\"items\"][0][\"statistics\"][\"viewCount\"]+\"\\n\\n\")\n #print(\"\\n상세내용:\\n\"+data[\"items\"][0][\"snippet\"][\"description\"])\n\n\ndef youtube_search(options):\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\n developerKey=DEVELOPER_KEY)\n\n # Call the search.list method to retrieve results matching the specified\n # query term.\n search_response = youtube.search().list(\n q=options.q,\n part=\"id,snippet\",\n maxResults=options.max_results,\n publishedAfter=insert_day,\n order=\"viewCount\"\n ).execute()\n\n videos = []\n\n # Add each result to the appropriate list, and then display the lists of\n # matching videos, channels, and playlists.\n for search_result in search_response.get(\"items\", []):\n if search_result[\"id\"][\"kind\"] == \"youtube#video\":\n videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"]))\n\n for value in range(len(videos)):\n Video_info(videos[value])\n\nif __name__ == \"__main__\":\n argparser.add_argument(\"--q\", help=\"Search term\", default=keyword)\n argparser.add_argument(\"--max-results\", help=\"Max results\", default=50)\n argparser.add_argument(\"--order\", help=\"order\", default=\"viewCount\")\n argparser.add_argument(\"--publishedAfter\", help=\"publishedAfter\", default=insert_day)\n args = argparser.parse_args()\n\n try:\n youtube_search(args)\n except HttpError as e:\n print (\"An HTTP error %d occurred:\\n%s\") % (e.resp.status, e.content)","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"464472657","text":"#\n# @lc app=leetcode id=189 lang=python3\n#\n# [189] Rotate Array\n#\n# https://leetcode.com/problems/rotate-array/description/\n#\n# algorithms\n# Easy (30.05%)\n# Total Accepted: 297.4K\n# Total Submissions: 989.2K\n# Testcase Example: '[1,2,3,4,5,6,7]\\n3'\n#\n# Given an array, rotate the array to the right by k steps, where k is\n# non-negative.\n# \n# Example 1:\n# \n# \n# Input: [1,2,3,4,5,6,7] and k = 3\n# Output: [5,6,7,1,2,3,4]\n# Explanation:\n# rotate 1 steps to the right: [7,1,2,3,4,5,6]\n# rotate 2 steps to the right: [6,7,1,2,3,4,5]\n# rotate 3 steps to the right: [5,6,7,1,2,3,4]\n# \n# \n# Example 2:\n# \n# \n# Input: [-1,-100,3,99] and k = 2\n# Output: [3,99,-1,-100]\n# Explanation: \n# rotate 1 steps to the right: [99,-1,-100,3]\n# rotate 2 steps to the right: [3,99,-1,-100]\n# \n# \n# Note:\n# \n# \n# Try to come up as many solutions as you can, there are at least 3 different\n# ways to solve this problem.\n# Could you do it in-place with O(1) extra space?\n# \n#\nclass Solution:\n\n def rotate(self, nums, k: int) -> None:\n \"\"\"\n 反转三次:前半段、后半段、整体\n 空间复杂度:O(1)\n \"\"\"\n length = len(nums)\n K = k % length\n\n def rotate_inplace(nums, start, end):\n while start < end:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end -= 1\n\n if K > 0:\n rotate_inplace(nums, 0, length - K - 1) # reverse 前半段\n rotate_inplace(nums, length - K, length - 1) # reverse 后半段\n rotate_inplace(nums, 0, length - 1) # reverse 整体\n\n # def rotate(self, nums: List[int], k: int) -> None:\n # \"\"\"\n # Do not return anything, modify nums in-place instead.\n # 时间复杂度:O(1)\n # 空间复杂度:O(n)\n # \"\"\"\n # length = len(nums)\n # K = k % length\n # nums[:K], nums[K:] = nums[length - K:], nums[:length - K] # 整体交换\n\n # def rotate(self, nums: List[int], k: int) -> None:\n # length = len(nums)\n # K = k%length\n # origin_nums = nums[:] # 原数组的拷贝\n # for i in range(length):\n # nums[(i+k)%length] = origin_nums[i]\n","sub_path":"array/189.rotate-array.py","file_name":"189.rotate-array.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"520064937","text":"class Solution:\n def totalNQueens(self, n: int) -> int:\n\n def _draw_board(cols):\n # Draow the board based on col number of each row\n return [\"\".join(['Q' if i == col else '.' for i in range(len(cols))]) for col in cols]\n\n def _is_valid(next_col, cols):\n x = next_col\n y = len(cols)\n for yp, xp in enumerate(cols):\n if x == xp:\n return False\n if yp - y == x - xp:\n return False\n if yp - y == xp - x:\n return False\n return True\n\n def _dfs(n, subset, output):\n if len(subset) == n:\n output.append(_draw_board(subset))\n return\n\n for i in range(n):\n if not _is_valid(i, subset):\n continue\n\n _dfs(n, subset + [i], output)\n\n output = []\n _dfs(n, [], output)\n return len(output)\n","sub_path":"leetcode/lc52_N-Queens II.py","file_name":"lc52_N-Queens II.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"443858396","text":"\n\nfrom xai.brain.wordbase.nouns._defoliant import _DEFOLIANT\n\n#calss header\nclass _DEFOLIANTS(_DEFOLIANT, ):\n\tdef __init__(self,): \n\t\t_DEFOLIANT.__init__(self)\n\t\tself.name = \"DEFOLIANTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"defoliant\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_defoliants.py","file_name":"_defoliants.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"278094019","text":"# -*- coding: utf-8 -*-\n# Copyright 2014 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom twisted.internet import defer\n\nfrom synapse.api.events.room import (\n RoomMemberEvent, RoomTopicEvent, FeedbackEvent, RoomNameEvent,\n RoomRedactionEvent,\n)\n\nfrom synapse.util.logutils import log_function\n\nfrom .directory import DirectoryStore\nfrom .feedback import FeedbackStore\nfrom .presence import PresenceStore\nfrom .profile import ProfileStore\nfrom .registration import RegistrationStore\nfrom .room import RoomStore\nfrom .roommember import RoomMemberStore\nfrom .stream import StreamStore\nfrom .transactions import TransactionStore\nfrom .keys import KeyStore\nfrom .event_federation import EventFederationStore\n\nfrom .state import StateStore\nfrom .signatures import SignatureStore\n\nfrom syutil.base64util import decode_base64\n\nfrom synapse.crypto.event_signing import compute_event_reference_hash\n\n\nimport json\nimport logging\nimport os\n\n\nlogger = logging.getLogger(__name__)\n\n\nSCHEMAS = [\n \"transactions\",\n \"users\",\n \"profiles\",\n \"presence\",\n \"im\",\n \"room_aliases\",\n \"keys\",\n \"redactions\",\n \"state\",\n \"event_edges\",\n \"event_signatures\",\n]\n\n\n# Remember to update this number every time an incompatible change is made to\n# database schema files, so the users will be informed on server restarts.\nSCHEMA_VERSION = 8\n\n\nclass _RollbackButIsFineException(Exception):\n \"\"\" This exception is used to rollback a transaction without implying\n something went wrong.\n \"\"\"\n pass\n\n\nclass DataStore(RoomMemberStore, RoomStore,\n RegistrationStore, StreamStore, ProfileStore, FeedbackStore,\n PresenceStore, TransactionStore,\n DirectoryStore, KeyStore, StateStore, SignatureStore,\n EventFederationStore, ):\n\n def __init__(self, hs):\n super(DataStore, self).__init__(hs)\n self.event_factory = hs.get_event_factory()\n self.hs = hs\n\n self.min_token_deferred = self._get_min_token()\n self.min_token = None\n\n @defer.inlineCallbacks\n @log_function\n def persist_event(self, event, backfilled=False, is_new_state=True,\n current_state=None):\n stream_ordering = None\n if backfilled:\n if not self.min_token_deferred.called:\n yield self.min_token_deferred\n self.min_token -= 1\n stream_ordering = self.min_token\n\n try:\n yield self.runInteraction(\n \"persist_event\",\n self._persist_event_txn,\n event=event,\n backfilled=backfilled,\n stream_ordering=stream_ordering,\n is_new_state=is_new_state,\n current_state=current_state,\n )\n except _RollbackButIsFineException:\n pass\n\n @defer.inlineCallbacks\n def get_event(self, event_id, allow_none=False):\n events_dict = yield self._simple_select_one(\n \"events\",\n {\"event_id\": event_id},\n [\n \"event_id\",\n \"type\",\n \"room_id\",\n \"content\",\n \"unrecognized_keys\",\n \"depth\",\n ],\n allow_none=allow_none,\n )\n\n if not events_dict:\n defer.returnValue(None)\n\n event = yield self._parse_events([events_dict])\n defer.returnValue(event[0])\n\n @log_function\n def _persist_event_txn(self, txn, event, backfilled, stream_ordering=None,\n is_new_state=True, current_state=None):\n if event.type == RoomMemberEvent.TYPE:\n self._store_room_member_txn(txn, event)\n elif event.type == FeedbackEvent.TYPE:\n self._store_feedback_txn(txn, event)\n elif event.type == RoomNameEvent.TYPE:\n self._store_room_name_txn(txn, event)\n elif event.type == RoomTopicEvent.TYPE:\n self._store_room_topic_txn(txn, event)\n elif event.type == RoomRedactionEvent.TYPE:\n self._store_redaction(txn, event)\n\n outlier = False\n if hasattr(event, \"outlier\"):\n outlier = event.outlier\n\n vals = {\n \"topological_ordering\": event.depth,\n \"event_id\": event.event_id,\n \"type\": event.type,\n \"room_id\": event.room_id,\n \"content\": json.dumps(event.content),\n \"processed\": True,\n \"outlier\": outlier,\n \"depth\": event.depth,\n }\n\n if stream_ordering is not None:\n vals[\"stream_ordering\"] = stream_ordering\n\n unrec = {\n k: v\n for k, v in event.get_full_dict().items()\n if k not in vals.keys() and k not in [\n \"redacted\",\n \"redacted_because\",\n \"signatures\",\n \"hashes\",\n \"prev_events\",\n ]\n }\n vals[\"unrecognized_keys\"] = json.dumps(unrec)\n\n try:\n self._simple_insert_txn(\n txn,\n \"events\",\n vals,\n or_replace=(not outlier),\n or_ignore=bool(outlier),\n )\n except:\n logger.warn(\n \"Failed to persist, probably duplicate: %s\",\n event.event_id,\n exc_info=True,\n )\n raise _RollbackButIsFineException(\"_persist_event\")\n\n self._handle_prev_events(\n txn,\n outlier=outlier,\n event_id=event.event_id,\n prev_events=event.prev_events,\n room_id=event.room_id,\n )\n\n self._store_state_groups_txn(txn, event)\n\n if current_state:\n txn.execute(\n \"DELETE FROM current_state_events WHERE room_id = ?\",\n (event.room_id,)\n )\n\n for s in current_state:\n self._simple_insert_txn(\n txn,\n \"current_state_events\",\n {\n \"event_id\": s.event_id,\n \"room_id\": s.room_id,\n \"type\": s.type,\n \"state_key\": s.state_key,\n },\n or_replace=True,\n )\n\n is_state = hasattr(event, \"state_key\") and event.state_key is not None\n if is_state:\n vals = {\n \"event_id\": event.event_id,\n \"room_id\": event.room_id,\n \"type\": event.type,\n \"state_key\": event.state_key,\n }\n\n if hasattr(event, \"replaces_state\"):\n vals[\"prev_state\"] = event.replaces_state\n\n self._simple_insert_txn(\n txn,\n \"state_events\",\n vals,\n or_replace=True,\n )\n\n if is_new_state:\n self._simple_insert_txn(\n txn,\n \"current_state_events\",\n {\n \"event_id\": event.event_id,\n \"room_id\": event.room_id,\n \"type\": event.type,\n \"state_key\": event.state_key,\n },\n or_replace=True,\n )\n\n for e_id, h in event.prev_state:\n self._simple_insert_txn(\n txn,\n table=\"event_edges\",\n values={\n \"event_id\": event.event_id,\n \"prev_event_id\": e_id,\n \"room_id\": event.room_id,\n \"is_state\": 1,\n },\n or_ignore=True,\n )\n\n if not backfilled:\n self._simple_insert_txn(\n txn,\n table=\"state_forward_extremities\",\n values={\n \"event_id\": event.event_id,\n \"room_id\": event.room_id,\n \"type\": event.type,\n \"state_key\": event.state_key,\n },\n or_replace=True,\n )\n\n for prev_state_id, _ in event.prev_state:\n self._simple_delete_txn(\n txn,\n table=\"state_forward_extremities\",\n keyvalues={\n \"event_id\": prev_state_id,\n }\n )\n\n for hash_alg, hash_base64 in event.hashes.items():\n hash_bytes = decode_base64(hash_base64)\n self._store_event_content_hash_txn(\n txn, event.event_id, hash_alg, hash_bytes,\n )\n\n if hasattr(event, \"signatures\"):\n logger.debug(\"sigs: %s\", event.signatures)\n for name, sigs in event.signatures.items():\n for key_id, signature_base64 in sigs.items():\n signature_bytes = decode_base64(signature_base64)\n self._store_event_signature_txn(\n txn, event.event_id, name, key_id,\n signature_bytes,\n )\n\n for prev_event_id, prev_hashes in event.prev_events:\n for alg, hash_base64 in prev_hashes.items():\n hash_bytes = decode_base64(hash_base64)\n self._store_prev_event_hash_txn(\n txn, event.event_id, prev_event_id, alg, hash_bytes\n )\n\n for auth_id, _ in event.auth_events:\n self._simple_insert_txn(\n txn,\n table=\"event_auth\",\n values={\n \"event_id\": event.event_id,\n \"room_id\": event.room_id,\n \"auth_id\": auth_id,\n },\n or_ignore=True,\n )\n\n (ref_alg, ref_hash_bytes) = compute_event_reference_hash(event)\n self._store_event_reference_hash_txn(\n txn, event.event_id, ref_alg, ref_hash_bytes\n )\n\n if not outlier:\n self._update_min_depth_for_room_txn(\n txn,\n event.room_id,\n event.depth\n )\n\n def _store_redaction(self, txn, event):\n txn.execute(\n \"INSERT OR IGNORE INTO redactions \"\n \"(event_id, redacts) VALUES (?,?)\",\n (event.event_id, event.redacts)\n )\n\n @defer.inlineCallbacks\n def get_current_state(self, room_id, event_type=None, state_key=\"\"):\n del_sql = (\n \"SELECT event_id FROM redactions WHERE redacts = e.event_id \"\n \"LIMIT 1\"\n )\n\n sql = (\n \"SELECT e.*, (%(redacted)s) AS redacted FROM events as e \"\n \"INNER JOIN current_state_events as c ON e.event_id = c.event_id \"\n \"INNER JOIN state_events as s ON e.event_id = s.event_id \"\n \"WHERE c.room_id = ? \"\n ) % {\n \"redacted\": del_sql,\n }\n\n if event_type:\n sql += \" AND s.type = ? AND s.state_key = ? \"\n args = (room_id, event_type, state_key)\n else:\n args = (room_id, )\n\n results = yield self._execute_and_decode(sql, *args)\n\n events = yield self._parse_events(results)\n defer.returnValue(events)\n\n @defer.inlineCallbacks\n def _get_min_token(self):\n row = yield self._execute(\n None,\n \"SELECT MIN(stream_ordering) FROM events\"\n )\n\n self.min_token = row[0][0] if row and row[0] and row[0][0] else -1\n self.min_token = min(self.min_token, -1)\n\n logger.debug(\"min_token is: %s\", self.min_token)\n\n defer.returnValue(self.min_token)\n\n def insert_client_ip(self, user, access_token, device_id, ip, user_agent):\n return self._simple_insert(\n \"user_ips\",\n {\n \"user\": user.to_string(),\n \"access_token\": access_token,\n \"device_id\": device_id,\n \"ip\": ip,\n \"user_agent\": user_agent,\n \"last_seen\": int(self._clock.time_msec()),\n }\n )\n\n def get_user_ip_and_agents(self, user):\n return self._simple_select_list(\n table=\"user_ips\",\n keyvalues={\"user\": user.to_string()},\n retcols=[\n \"device_id\", \"access_token\", \"ip\", \"user_agent\", \"last_seen\"\n ],\n )\n\n def snapshot_room(self, event):\n \"\"\"Snapshot the room for an update by a user\n Args:\n room_id (synapse.types.RoomId): The room to snapshot.\n user_id (synapse.types.UserId): The user to snapshot the room for.\n state_type (str): Optional state type to snapshot.\n state_key (str): Optional state key to snapshot.\n Returns:\n synapse.storage.Snapshot: A snapshot of the state of the room.\n \"\"\"\n def _snapshot(txn):\n prev_events = self._get_latest_events_in_room(\n txn,\n event.room_id\n )\n\n prev_state = None\n state_key = None\n if hasattr(event, \"state_key\"):\n state_key = event.state_key\n prev_state = self._get_latest_state_in_room(\n txn,\n event.room_id,\n type=event.type,\n state_key=state_key,\n )\n\n return Snapshot(\n store=self,\n room_id=event.room_id,\n user_id=event.user_id,\n prev_events=prev_events,\n prev_state=prev_state,\n state_type=event.type,\n state_key=state_key,\n )\n\n return self.runInteraction(\"snapshot_room\", _snapshot)\n\n\nclass Snapshot(object):\n \"\"\"Snapshot of the state of a room\n Args:\n store (DataStore): The datastore.\n room_id (RoomId): The room of the snapshot.\n user_id (UserId): The user this snapshot is for.\n prev_events (list): The list of event ids this snapshot is after.\n membership_state (RoomMemberEvent): The current state of the user in\n the room.\n state_type (str, optional): State type captured by the snapshot\n state_key (str, optional): State key captured by the snapshot\n prev_state_pdu (PduEntry, optional): pdu id of\n the previous value of the state type and key in the room.\n \"\"\"\n\n def __init__(self, store, room_id, user_id, prev_events,\n prev_state, state_type=None, state_key=None):\n self.store = store\n self.room_id = room_id\n self.user_id = user_id\n self.prev_events = prev_events\n self.prev_state = prev_state\n self.state_type = state_type\n self.state_key = state_key\n\n def fill_out_prev_events(self, event):\n if not hasattr(event, \"prev_events\"):\n event.prev_events = [\n (event_id, hashes)\n for event_id, hashes, _ in self.prev_events\n ]\n\n if self.prev_events:\n event.depth = max([int(v) for _, _, v in self.prev_events]) + 1\n else:\n event.depth = 0\n\n if not hasattr(event, \"prev_state\") and self.prev_state is not None:\n event.prev_state = self.prev_state\n\n\ndef schema_path(schema):\n \"\"\" Get a filesystem path for the named database schema\n\n Args:\n schema: Name of the database schema.\n Returns:\n A filesystem path pointing at a \".sql\" file.\n\n \"\"\"\n dir_path = os.path.dirname(__file__)\n schemaPath = os.path.join(dir_path, \"schema\", schema + \".sql\")\n return schemaPath\n\n\ndef read_schema(schema):\n \"\"\" Read the named database schema.\n\n Args:\n schema: Name of the datbase schema.\n Returns:\n A string containing the database schema.\n \"\"\"\n with open(schema_path(schema)) as schema_file:\n return schema_file.read()\n\n\ndef prepare_database(db_conn):\n \"\"\" Set up all the dbs. Since all the *.sql have IF NOT EXISTS, so we\n don't have to worry about overwriting existing content.\n \"\"\"\n c = db_conn.cursor()\n c.execute(\"PRAGMA user_version\")\n row = c.fetchone()\n\n if row and row[0]:\n user_version = row[0]\n\n if user_version > SCHEMA_VERSION:\n raise ValueError(\n \"Cannot use this database as it is too \" +\n \"new for the server to understand\"\n )\n elif user_version < SCHEMA_VERSION:\n logger.info(\n \"Upgrading database from version %d\",\n user_version\n )\n\n # Run every version since after the current version.\n for v in range(user_version + 1, SCHEMA_VERSION + 1):\n sql_script = read_schema(\"delta/v%d\" % (v))\n c.executescript(sql_script)\n\n db_conn.commit()\n\n else:\n sql_script = \"BEGIN TRANSACTION;\\n\"\n for sql_loc in SCHEMAS:\n sql_script += read_schema(sql_loc)\n sql_script += \"\\n\"\n sql_script += \"COMMIT TRANSACTION;\"\n c.executescript(sql_script)\n db_conn.commit()\n c.execute(\"PRAGMA user_version = %d\" % SCHEMA_VERSION)\n\n c.close()\n","sub_path":"synapse/storage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":17837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"313871222","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 31 02:32:24 2020\r\n\r\n@author: 양현영\r\n\"\"\"\r\nimport requests\r\nimport time\r\nimport json\r\nimport pandas as pd\r\nimport numpy as np\r\nimport time\r\nimport logging\r\nfrom datetime import datetime\r\n\r\nlogging.basicConfig(filename=\"./\"+datetime.today().strftime(\"%Y-%m-%d\")+\".log\", level=logging.INFO)\r\n\r\nclass MatchTimelineReceiver:\r\n def __init__(self, api_key, csv_name):\r\n self.API_KEY = api_key\r\n self.csv_name = csv_name\r\n \r\n def set_dbcontroller(self, db_controller):\r\n self.db_controller_ = db_controller\r\n \r\n def update_db(self, matchTimeline_df):\r\n self.db_controller_.update_matchTimeline(matchTimeline_df)\r\n \r\n def make_matchTimeline_df(self, r_get_timeline, gameId):\r\n r_get_timeline_json = r_get_timeline.json()\r\n usersTimeline_df = pd.DataFrame([r_get_timeline_json])\r\n usersTimeline_df[\"gameId\"] = int(gameId)\r\n \r\n return usersTimeline_df\r\n \r\n def request_matchTimeline(self, unique_gameId):\r\n loop = 0\r\n flag = False\r\n total_length = len(unique_gameId)\r\n \r\n while True:\r\n get_matchTimeline = \"https://kr.api.riotgames.com/lol/match/v4/timelines/by-match/\"+ \\\r\n str(unique_gameId[loop]) +\"?api_key=\"+self.API_KEY\r\n r_get_matchTimeline = requests.get(get_matchTimeline)\r\n \r\n if r_get_matchTimeline.status_code == 429:\r\n logging.warning(str(unique_gameId[loop])+\" time exceed\")\r\n time.sleep(30)\r\n continue\r\n elif r_get_matchTimeline.status_code != 200:\r\n logging.error(\"status code = \"+str(r_get_matchTimeline.status_code))\r\n time.sleep(10)\r\n continue\r\n \r\n if not flag:\r\n flag = True\r\n users_matchTimeline_df_after = self.make_matchTimeline_df(r_get_matchTimeline, unique_gameId[loop])\r\n else:\r\n users_matchTimeline_df = self.make_matchTimeline_df(r_get_matchTimeline, unique_gameId[loop])\r\n users_matchTimeline_df_after = pd.concat([users_matchTimeline_df_after, users_matchTimeline_df], axis=0)\r\n \r\n # find all matches\r\n # if loop%100==99:\r\n # users_matchTimeline_df_after.to_csv(self.csv_name, mode=\"a\", header=False)\r\n logging.info(str(unique_gameId[loop])+\" to csv\"+str(datetime.today()))\r\n\r\n loop+=1\r\n \r\n if loop%1000==999:\r\n flag=False\r\n self.update_db(users_matchTimeline_df_after)\r\n logging.info(str(unique_gameId[loop])+\" to db \"+str(datetime.today()))\r\n \r\n if loop==total_length:\r\n break\r\n \r\n return users_matchTimeline_df_after\r\n\r\n def run(self, unique_gameId, db_controller):\r\n self.set_dbcontroller(db_controller)\r\n self.request_matchTimeline(unique_gameId)","sub_path":"game_data_download/matchTimelineReceiver.py","file_name":"matchTimelineReceiver.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"235458781","text":"\"\"\"\nMAIN PURPOSE: download all of the subhalos needed.\n\"\"\"\n\nimport h5py\nimport numpy as np\nimport os\nimport shutil\nimport io\n\nimport tqdm\n\nimport illpy as ill\n\nfrom utils.generalfuncs import download_sub\nfrom utils import SubProcess\n\nFLUSH_INT = 10\n\n\ndef write_complete(fio, row, flush=0):\n # number, which, snap, subhalo\n fio.write('%i\\t%i\\t%i\\t%i\\n' % (row[0], row[1], row[2], row[3]))\n if (flush+1) % FLUSH_INT == 0:\n fio.flush()\n\n flush = flush + 1\n return flush\n\n\nclass Download_Needed(SubProcess):\n \"\"\"\n Download_Needed downloads all of the subhalos necessary for host galaxy characterization of the mergers. For host galaxies post-merger, particle information is downloaded for gas, star, bh, and dm particles. For host galaxies pre-merger, particle information is downloaded for star and bh particles.\n\n THIS CODE DOES NOT CHECK IF IT IS NEEDED. THE USER MUST KNOW STATUS USING ``completed_snaps_and_subs.txt`` COMPARED TO ``snaps_and_subs_needed.txt``.\n\n To use this file, you need to have a specific file structure within self.dir_output.\n '%i/%i_sub_cutouts/' % (snap, snap). The files are then stored in the struture as '%i/%i_sub_cutouts/cutout_%i_%i.hdf5' % (snap, snap, snap, sub).\n\n attributes:\n :param ill_run - (int) - illustris run to use\n :param dir_output - (str) - dir_output to work out of\n\n \"\"\"\n\n def __init__(self, core, **kwargs):\n super().__init__(core)\n\n\n def download_needed_subhalos(self):\n \"\"\"\n Download all the subhalos files needed.\n \"\"\"\n fname_needed = self.core.fname_snaps_and_subs()\n snap_subs_needed = np.genfromtxt(fname_needed, skip_header=1, names=True, dtype=None)\n\n # need to keep track because this process will time out.\n fname_completed = self.core.fname_snaps_and_subs_completed()\n exists = os.path.exists(fname_completed)\n print(\"Completed snaps and subs: '{}' exists: {}\".format(fname_completed, exists))\n\n if exists:\n f_complete = open(fname_completed, 'r+')\n lines_completed = f_complete.readlines()\n start = len(lines_completed)\n last = None if len(lines_completed) == 0 else lines_completed[-1]\n print(\"Start = '{}', last line: '{}'\".format(start, last))\n else:\n f_complete = open(fname_completed, 'w')\n start = 0\n\n '''\n try:\n f_complete = open(fname_completed, 'r+')\n except FileNotFoundError:\n f_complete = open(fname_completed, 'w')\n\n # figure out where you left off\n try:\n lines_completed = f_complete.readlines()\n start = len(lines_completed)\n last = None if len(lines_completed) == 0 else lines_completed[-1]\n print(\"Start = '{}', last line: '{}'\".format(start, last))\n print(\"snap_subs_needed[{}-1]: '{}'\".format(start, snap_subs_needed[start-1]))\n except io.UnsupportedOperation:\n start = 0\n\n print(start)\n print(len(snap_subs_needed[start:]))\n '''\n\n # raise\n flush = 0\n for i, row in enumerate(tqdm.tqdm(snap_subs_needed[start:], desc='Snap subhalos needed')):\n which = row[1]\n snap = row[2]\n sub = row[3]\n fname_snap_sub_cutout = self.core.fname_snap_sub_cutout(snap, sub)\n '''\n if os.path.exists(fname_snap_sub_cutout):\n print(\"WARNING: i={}, row={}\\n\\tfile exists '{}'\".format(\n i, row, fname_snap_sub_cutout))\n flush = write_complete(f_complete, row, flush=flush)\n continue\n '''\n '''\n else:\n print(i, row)\n print(\"DOES NOT EXIST!\")\n raise\n '''\n\n # Make sure output path exists\n path_snap_sub = os.path.dirname(fname_snap_sub_cutout)\n os.makedirs(path_snap_sub, exist_ok=True)\n\n # if which != 3, we only want stars and bhs\n # check if this file is already there\n if which != 3:\n if os.path.exists(fname_snap_sub_cutout):\n with h5py.File(fname_snap_sub_cutout, 'r') as f:\n if 'PartType4' in f:\n # f_complete.write('%i\\t%i\\t%i\\t%i\\n' % (row[0], row[1], snap, sub))\n flush = write_complete(f_complete, row, flush=flush)\n continue\n\n # if which != 3, we only want stars and bhs\n if which != 3:\n cutout_request = {\n 'bhs': 'all',\n 'stars': 'Coordinates,Masses,Velocities,GFM_StellarFormationTime'\n }\n\n # if which == 3, get all particle data with quant ities of interest\n else:\n cutout_request = {\n 'bhs': 'all',\n 'gas': 'Coordinates,Masses,Velocities,StarFormationRate',\n 'stars': 'Coordinates,Masses,Velocities,GFM_StellarFormationTime',\n 'dm': 'Coordinates,Velocities'\n }\n\n # cutout = download_sub(self.base_url, snap, sub, cutout_request=cutout_request)\n cutout = self.get_cutout(snap, sub, cutout_request=cutout_request)\n\n # move file for good organization\n # os.rename(cutout, fname_snap_sub_cutout)\n shutil.move(cutout, fname_snap_sub_cutout)\n\n # record what has been completed\n # f_complete.write('%i\\t%i\\t%i\\t%i\\n' % (row[0], row[1], snap, sub))\n flush = write_complete(f_complete, row, flush=flush)\n\n return\n\n def get_cutout(self, snap, sub, cutout_request=None):\n return download_sub(self.base_url, snap, sub, cutout_request=cutout_request)\n\n\nclass Download_Needed_Odyssey(Download_Needed):\n \"\"\"\n \"\"\"\n\n '''\n def get_cutout(self, snap, sub, cutout_request=None):\n # return download_sub(self.base_url, snap, sub, cutout_request=cutout_request)\n\n import illpy.snapshot\n\n part_nums = [0, 1, 4, 5]\n # fields = None\n\n part_fields = {\n 0: ['Coordinates', 'Masses', 'Velocities', 'StarFormationRate'],\n 1: ['Coordinates', 'Velocities'],\n 4: ['Coordinates', 'Masses', 'Velocities', 'GFM_StellarFormationTime'],\n 5: None,\n }\n\n path = self.core.dir_input\n\n fname_out = \"temp_snap{}_sub{}.hdf5\".format(snap, sub)\n\n with h5py.File(fname_out, 'w') as out:\n for pt in part_nums:\n pt_key = \"PartType{}\".format(pt)\n fields = part_fields[pt]\n group = out.create_group(pt_key)\n subh = ill.snapshot.loadSubhalo(path, snap, sub, pt, fields=fields)\n keys = list(subh.keys())\n # print(\"{} : keys = '{}'\".format(pt, keys))\n for key in keys:\n group[key] = subh[key]\n\n # print(\"File '{}' size: {}\".format(fname_out, os.path.getsize(fname_out)))\n\n # raise\n return fname_out\n '''\n\n def get_cutout(self, snap, sub, cutout_request=None):\n # return download_sub(self.base_url, snap, sub, cutout_request=cutout_request)\n\n import illpy.snapshot\n\n part_nums = [0, 1, 4, 5]\n # fields = None\n\n '''\n part_fields = {\n 0: ['Coordinates', 'Masses', 'Velocities', 'StarFormationRate'],\n 1: ['Coordinates', 'Velocities'],\n 4: ['Coordinates', 'Masses', 'Velocities', 'GFM_StellarFormationTime'],\n 5: None,\n }\n '''\n\n # Determine what parameters/fields we want from the cutout (subhalo)\n if cutout_request is None:\n part_fields = {\n 0: ['Coordinates', 'Masses', 'Velocities', 'StarFormationRate'],\n 1: ['Coordinates', 'Velocities'],\n 4: ['Coordinates', 'Masses', 'Velocities', 'GFM_StellarFormationTime'],\n 5: None,\n }\n else:\n part_fields = {}\n for key, val in cutout_request.items():\n pnum = ill.snapshot.partTypeNum(key)\n fields = [vv.strip() for vv in val.strip().split(',')]\n fields = None if 'all' in fields else fields\n part_fields[pnum] = fields\n\n path = self.core.dir_input\n fname_out = \"temp_snap{}_sub{}.hdf5\".format(snap, sub)\n\n with h5py.File(fname_out, 'w') as out:\n out_keys = list(out.keys())\n for pt, fields in part_fields.items():\n pt_key = \"PartType{}\".format(pt)\n # fields = part_fields[pt]\n group = out.create_group(pt_key)\n subh = ill.snapshot.loadSubhalo(path, snap, sub, pt, fields=fields)\n keys = list(subh.keys())\n # print(\"{} : keys = '{}'\".format(pt, keys))\n for key in keys:\n group[key] = subh[key]\n\n # print(\"File '{}' size: {}\".format(fname_out, os.path.getsize(fname_out)))\n\n # raise\n return fname_out\n","sub_path":"extraction/utils/download_needed.py","file_name":"download_needed.py","file_ext":"py","file_size_in_byte":9195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"218999780","text":"# -*- coding:utf-8 -*-\n\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom suds.client import Client as sudsclient\nimport json\nfrom Util import myUtil\nfrom models import *\n\n__author__ = 'kiven'\n\ndef index(request):\n proInfo = {}\n personInfo = {}\n\n util = myUtil()\n sql = util.getSQLStatement('static/SQL/cpm.sql')\n sql_cpms = util.getSQLStatement('static/SQL/cpms.sql')\n sql_nocpm = util.getSQLStatement('static/SQL/nocpm.sql')\n\n\n\n result = util.getQueryResult(sql)\n result_cpms = util.getQueryResult(sql_cpms)\n result_nocpm = util.getQueryResult(sql_nocpm)\n\n\n\n # 三种SQL情况下的所有issue状态为BUG的放入一个列表中\n if result_cpms:\n for i in range(len(result_cpms)):\n result.append(result_cpms[i])\n if result_nocpm:\n for j in range(len(result_nocpm)):\n result.append(result_nocpm[j])\n\n # 获取所有项目名\n pName = util.getProName(result)\n\n # 获取状态以及各状态BUG数量\n allBugNum = 0\n inprogessNum = 0\n openBugNum = 0\n closedBugNum = 0\n resolvedBugNum = 0\n todayOpenBugNum = 0\n todayReopenBugNum =0\n todayCloseBugNum = 0\n\n for i in range(len(pName)):\n status = util.getProStatus(result,pName[i])\n proInfo[pName[i]] = status\n # 所有项目的所有BUG总数\n allBugNum += status['total']\n openBugNum += status['Open']\n inprogessNum += status['InProgress']\n closedBugNum += status['Closed']\n resolvedBugNum += status['Resolved']\n todayOpenBugNum += status['todayOpen']\n todayReopenBugNum += status['todayReopen']\n todayCloseBugNum += status['todayClose']\n # status.append(util.getProStatus(result,pName[i]))\n\n\n\n # 个人BUG各状态数量\n allStatusNum = openStatusNum = closeStatusNum = resolvedStatusNum = inprogessStatusNum = 0\n\n sql_getperson = util.getSQLStatement('static/SQL/getPerson.sql')\n result_getperson = util.getQueryResult(sql_getperson)\n personName = util.getPerson(result_getperson)\n for k in range(len(personName)):\n status = util.getBugStatus(result_getperson,personName[k])\n personInfo[personName[k]] = status\n allStatusNum += status['total']\n openStatusNum += status['open']\n closeStatusNum += status['closed']\n resolvedStatusNum += status['resolved']\n inprogessStatusNum += status['inprogess']\n\n\n\n #\n return render_to_response('index.html',{\n 'proInfo':proInfo,\n 'allBugNum':allBugNum,\n 'openBugNum':openBugNum,\n 'inprogressNum':inprogessNum,\n 'closedBugNum':closedBugNum,\n 'resolvedBugNum':resolvedBugNum,\n 'todayOpenBugNum':todayOpenBugNum,\n 'todayReopenBugNum':todayReopenBugNum,\n 'todayCloseBugNum':todayCloseBugNum,\n\n 'personInfo':personInfo,\n 'allStatusNum':allStatusNum,\n 'openStatusNum':openStatusNum,\n 'closeStatusNum':closeStatusNum,\n 'resolvedStatusNum':resolvedStatusNum,\n 'inprogessStatusNum':inprogessStatusNum,\n })\n\n\ndef send_email(request):\n if request.method==\"POST\" and request.POST['mailContent']:\n sudsclient_email=sudsclient(\"http://192.168.81.123/SendMail/SendMail.asmx?wsdl\")\n try:\n sudsclient_email.service.SendMailWithHtml2(\"shen_jl@ctrip.com\",u\"金融测试组(功能)项目BUG统计\",request.POST['mailContent'],\"shen_jl@ctrip.com\")\n except:\n return HttpResponse(u\"发送失败!\")\n return HttpResponse(u'发送成功!')\n\ndef toMySQL(request):\n ass = AssignModel()\n\ndef mainReport(request):\n util = myUtil()\n sql = \"SELECT cu.lower_display_name FROM cwd_user cu LEFT JOIN cwd_membership cms ON cu.ID = cms.child_id where cms.parent_id = 29988;\"\n results = util.getQueryResult(sql)\n\n return render_to_response('MainReport.html',{'results':results})\n","sub_path":"CP4Reporter/HomePage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"441139265","text":"# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport os\n\nfrom twitter.common.collections import OrderedSet\n\nfrom pants.base.build_file import BuildFile\nfrom pants.base.specs import DescendantAddresses, SiblingAddresses\nfrom pants.build_graph.address_lookup_error import AddressLookupError\nfrom pants.build_graph.address_mapper import AddressMapper\nfrom pants.util.dirutil import fast_relpath\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LegacyAddressMapper(AddressMapper):\n \"\"\"Provides a facade over the engine backed build graph.\n\n This allows tasks to use the context's address_mapper when the v2 engine is enabled.\n \"\"\"\n\n def __init__(self, graph, build_root):\n self._build_root = build_root\n self._graph = graph\n\n @staticmethod\n def is_declaring_file(address, file_path):\n # NB: this will cause any BUILD file, whether it contains the address declaration or not to be\n # considered the one that declared it. That's ok though, because the spec path should be enough\n # information for debugging most of the time.\n #\n # We could call into the engine to ask for the file that declared the address.\n return (os.path.dirname(file_path) == address.spec_path and\n BuildFile._is_buildfile_name(os.path.basename(file_path)))\n\n def addresses_in_spec_path(self, spec_path):\n try:\n return set(self._graph.inject_specs_closure([SiblingAddresses(spec_path)]))\n except AddressLookupError as e:\n raise self.BuildFileScanError(str(e))\n\n def scan_specs(self, specs, fail_fast=True):\n try:\n return OrderedSet(self._graph.inject_specs_closure(specs, fail_fast))\n except AddressLookupError as e:\n raise self.BuildFileScanError(str(e))\n\n def resolve(self, address):\n try:\n target = self._graph.get_target(address)\n if not target:\n try:\n addresses = self.addresses_in_spec_path(address.spec_path)\n except AddressLookupError:\n addresses = set()\n\n raise self._raise_incorrect_address_error(address.spec_path, address.target_name, addresses)\n return address, target\n\n except AddressLookupError as e:\n raise self.BuildFileScanError(str(e))\n\n def scan_addresses(self, root=None):\n if root:\n try:\n base_path = fast_relpath(root, self._build_root)\n except ValueError as e:\n raise self.InvalidRootError(e)\n else:\n base_path = ''\n\n addresses = set()\n for address in self._graph.inject_specs_closure([DescendantAddresses(base_path)]):\n addresses.add(address)\n return addresses\n","sub_path":"src/python/pants/engine/legacy/address_mapper.py","file_name":"address_mapper.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"586654689","text":"n = int(input())\r\nmatrix = []\r\nlist_of_bomb_coordinates = []\r\ndamage_r = [-1, -1, -1, 0, 0, 1, 1, 1]\r\ndamage_c = [-1, 0, 1, -1, 1, -1, 0, 1]\r\nalive_cells_sum = 0\r\nalive_cells_count = 0\r\n\r\nfor _ in range(n):\r\n add_current_row = [int(x) for x in input().split()]\r\n matrix.append(add_current_row)\r\n\r\nbomb_coordinates = input().split()\r\nfor coord_row in bomb_coordinates:\r\n row, col = [int(x) for x in coord_row.split(\",\")]\r\n list_of_bomb_coordinates.append([row, col])\r\n\r\nfor coordinate in list_of_bomb_coordinates:\r\n bomb_r = coordinate[0]\r\n bomb_c = coordinate[1]\r\n bomb_value = matrix[bomb_r][bomb_c]\r\n if bomb_value <= 0:\r\n continue\r\n else:\r\n matrix[bomb_r][bomb_c] = 0\r\n for i in range(len(damage_r)):\r\n row_index_dmg = bomb_r + damage_r[i]\r\n col_index_dmg = bomb_c + damage_c[i]\r\n bomb_index_checker = [row_index_dmg, col_index_dmg]\r\n if 0 <= row_index_dmg < n and 0 <= col_index_dmg < n:\r\n # if bomb_index_checker not in list_of_bomb_coordinates:\r\n if matrix[row_index_dmg][col_index_dmg] > 0:\r\n matrix[row_index_dmg][col_index_dmg] -= bomb_value\r\n\r\nfor r in range(n):\r\n for c in range(n):\r\n if matrix[r][c] > 0:\r\n alive_cells_count += 1\r\n alive_cells_sum += matrix[r][c]\r\nprint(f\"Alive cells: {alive_cells_count}\")\r\nprint(f\"Sum: {alive_cells_sum}\")\r\n\r\nfor row_matrix in matrix:\r\n print(*row_matrix)","sub_path":"8_multidimensional_lists_exercise/07_bombs.py","file_name":"07_bombs.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"588819820","text":"from osgeo import ogr\nimport glob\nimport time\nimport csv\n\nHOST = \"\"\nPORT = \"\"\nDATABASE = \"\"\nSCHEMA = \"\"\nUSER = \"\"\nPASSWORD = \"\"\n\nDATA_DIR = \"./lcvs/\"\n\n\ndef updateTaxaCites():\n connString = \"PG: host='\" + HOST + \"' port=\" + PORT + \" dbname=\" + DATABASE + \" active_schema=\" + SCHEMA + \" user=\" + USER + \" password=\" + PASSWORD\n lcvs_dir = DATA_DIR\n\n # Open database \n dsOut = ogr.Open(connString)\n if dsOut is None:\n print(\"Could not open PostGIS Database \" + connString)\n return\n\n csv_list = sorted(glob.glob(lcvs_dir + '*.csv'))\n for csvfile in csv_list:\n print(csvfile)\n\n with open(csvfile, encoding=\"utf8\") as f:\n records = csv.reader(f, delimiter=',')\n \n i = 0\n for record in records:\n print(i)\n \n if i == 0: # header\n print(str(record)) \n scientificName_field = -1\n taxonRank_field = -1\n lcvsStatus_field = -1\n \n field_index = 0\n for field in record:\n if field.lower() == \"scientificname\":\n scientificName_field = field_index\n elif field.lower() == \"taxonrank\":\n taxonRank_field = field_index\n elif field.lower() == \"lcvsstatus\":\n lcvsStatus_field = field_index\n\n field_index += 1\n\n print(\"scientificName\", scientificName_field)\n print(\"taxonRank\", taxonRank_field)\n print(\"lcvsstatus\", lcvsStatus_field)\n\n # break \n else:\n if scientificName_field != -1:\n scientific_name = record[scientificName_field]\n else:\n continue\n if taxonRank_field != -1:\n taxonRank = record[taxonRank_field]\n else:\n continue\n if lcvsStatus_field != -1:\n lcvsStatus = record[lcvsStatus_field]\n else:\n continue\n\n if taxonRank.lower() == \"family\":\n taxonRank_name = \"family_name\"\n elif taxonRank.lower() == \"genus\":\n taxonRank_name = \"genus_name\"\n elif taxonRank.lower() == \"species\":\n taxonRank_name = \"scientific_name\"\n\n print(scientific_name + \" \" + taxonRank_name) \n\n # Assemble SQL statement\n query_occurrences = \"UPDATE taxon_occurrence SET lcvs_status = '{}' WHERE {} = '{}';\".format(lcvsStatus, taxonRank_name, scientific_name)\n print(query_occurrences)\n dsOut.ExecuteSQL(query_occurrences)\n query_distribution = \"UPDATE taxon_distribution SET lcvs_status = '{}' WHERE {} = '{}';\".format(lcvsStatus, taxonRank_name, scientific_name)\n print(query_distribution)\n dsOut.ExecuteSQL(query_distribution)\n\n i = i + 1\n \n dsOut = None\n\nif __name__ == '__main__':\n start = time.time()\n updateTaxaCites()\n end = time.time()\n\n print(\"Tiempo de inicio:\", start)\n print(\"Tiempo de finalización:\", end)\n print(\"Tiempo de ejecución:\", end - start, \"segundos\")\n","sub_path":"update-taxa-lcvs.py","file_name":"update-taxa-lcvs.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"34357281","text":"import setuptools\nimport json\n\ndef locked_requirements(section):\n \"\"\"Look through the 'Pipfile.lock' to fetch requirements by section.\n https://stackoverflow.com/a/50957425/14775744\"\"\"\n with open('Pipfile.lock') as pip_file:\n pipfile_json = json.load(pip_file)\n\n if section not in pipfile_json:\n print(\"{0} section missing from Pipfile.lock\".format(section))\n return []\n\n return [package + detail.get('version', \"\")\n for package, detail in pipfile_json[section].items()]\n\nsetuptools.setup(\n name=\"aws_s3_desktop_uploader\",\n version=\"0.0.1\",\n description=\"A simple Python package\",\n url=\"https://github.com/Salk-Harnessing-Plants-Initiative/aws-s3-desktop-uploader\",\n author=\"Salk HPI\",\n license=\"MIT\",\n packages=setuptools.find_packages(),\n python_requires=\">=3\",\n install_requires=locked_requirements('default')\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"290675392","text":"\"\"\"CFNgin hook for cleaning up resources prior to CFN stack deletion.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING, Any\n\nif TYPE_CHECKING:\n from ...context import CfnginContext\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef delete_param(context: CfnginContext, *, parameter_name: str, **_: Any) -> bool:\n \"\"\"Delete SSM parameter.\"\"\"\n if not parameter_name:\n raise ValueError(\"Must specify `parameter_name` for delete_param hook.\")\n\n session = context.get_session()\n ssm_client = session.client(\"ssm\")\n\n try:\n ssm_client.delete_parameter(Name=parameter_name)\n except ssm_client.exceptions.ParameterNotFound:\n LOGGER.info('parameter \"%s\" does not exist', parameter_name)\n return True\n","sub_path":"runway/cfngin/hooks/cleanup_ssm.py","file_name":"cleanup_ssm.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"514965924","text":"import unittest\nfrom Trees.src.trees.bst_tree import BST\nfrom Trees.src.nodes.bst_node import BSTNode\n\n\nclass TestBST(unittest.TestCase):\n def test_create_empty_tree(self):\n tree = BST()\n self.assertEqual(len(tree), 0)\n self.assertIsNone(tree.root)\n\n def test_create_tree(self):\n tree = BST()\n tree.add_value(100)\n tree.add_value(80)\n tree.add_value(200)\n tree.add_value(90)\n tree.add_value(70)\n\n root = BSTNode(100)\n root.left = BSTNode(80)\n root.right = BSTNode(200)\n root.left.left = BSTNode(70)\n root.left.right = BSTNode(90)\n\n cmp_tree = BST(root)\n self.assertEqual(tree, cmp_tree)\n\n\n def test_tree_not_eq(self):\n tree = BST()\n tree.add_value(100)\n tree.add_value(80)\n tree.add_value(200)\n tree.add_value(90)\n tree.add_value(70)\n\n root = BSTNode(100)\n root.left = BSTNode(80)\n root.right = BSTNode(200)\n root.left.left = BSTNode(70)\n root.left.right = BSTNode(92)\n\n cmp_tree = BST(root)\n cmp_tree._num_nodes = 5\n self.assertNotEqual(tree, cmp_tree)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Trees/test/trees/test_bst.py","file_name":"test_bst.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"451900711","text":"from phase1.other_functions import *\nfrom keras.callbacks import TensorBoard\nfrom candidate import Candidate\n\nFILENAME = \"../data3.txt\"\n\n\ndef binary_to_int(value):\n to_send = \"\"\n for i in range(len(value)):\n to_send += str(value[i])\n to_send = int(to_send, 2)\n return to_send\n\n\ndef evaluate(individual, data):\n # individual: layer_size + training_percentage + batch_size + epochs\n # 8-63 0.6-0.9 4-63 1-15\n # 6 bits 4 bits 6 bits 4 bits\n # total of 20 bits\n layer_size = binary_to_int(individual.value[:6])\n if layer_size < 8:\n layer_size = 8\n training_percentage = binary_to_int(individual.value[6:10])\n training_percentage = (training_percentage/15)*0.3+0.6\n batch_size = binary_to_int(individual.value[10:16])\n if batch_size < 4:\n batch_size = 4\n epochs = binary_to_int(individual.value[16:])\n if epochs < 1:\n epochs = 1\n\n model = create_model(input_dim=len(data[0]) - 1, layer_size=layer_size)\n\n # model.summary()\n name = f\"GA-NN-{binary_to_int(individual.value)}\"\n tensorboard = TensorBoard(log_dir=f\"logs/{name}\")\n\n X_train, Y_train = prepare_data(data[:int(training_percentage * len(data))])\n X_test, Y_test = prepare_data(data[int(training_percentage * len(data)):])\n\n model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, verbose=0, callbacks=[tensorboard])\n\n score = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=0)\n\n individual.fitness = int(score[1]*1000)\n\n\ndef pop_initialise(population_size, candidate_length):\n population = []\n\n for i in range(population_size):\n value = np.random.randint(2, size=[candidate_length])\n population.append(Candidate(candidate_length, value=value))\n\n return np.array(population)","sub_path":"data3-nn/phase2/new_functions.py","file_name":"new_functions.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"514940245","text":"#!/usr/bin/env python3\n\nimport sys\nimport json\nimport wiki\n\n# Example: https://en.wikipedia.org/w/api.php?action=query&format=json&list=categorymembers&cmtype=subcat&cmtitle=Category:Anatomy\n\ndef main():\n if len(sys.argv) < 2:\n # NOTE: The category can be specified with either spaces or underscores.\n sys.stderr.write(\"{0} CATEGORY\\n\".format(sys.argv[0]))\n sys.exit(1)\n\n category = sys.argv[1]\n\n wiki_api = wiki.API('wikipedia')\n #ganfyd_api = wiki.API('ganfyd')\n\n #obj = wiki_api.query_category_subcat(category)\n obj = wiki_api.get_category_subcategories(category)\n\n json.dump(obj, sys.stdout, indent=2)\n sys.stdout.write('\\n')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"get-subcat.py","file_name":"get-subcat.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"585104570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 17 19:36:35 2018\n\n@author: svellanki\n\"\"\"\nimport pandas as pd\n\ndef entity_options(entities):\n dic1= (entities.groupby(['entity']).col_name.nunique())\n dic2=(entities[entities.col_name=='column'].groupby(['entity']).list.nunique())\n \n# dic=dic1.append(dic2)\n \n dic1=dic1[dic1>1]\n \n options=[entities]\n flag=True\n for ent, i in dic1.iteritems():\n inter_options=[]\n for opt in options:\n print (ent)\n cols=opt[opt.entity==ent].col_name.unique().tolist()\n for each in cols:\n temp_option=opt[((opt.entity==ent) & (opt.col_name==each)) | (opt.entity!=ent)]\n inter_options+=[temp_option]\n\n \n options=inter_options\n\n #for 2 columns found for same entity\n for ent, i in dic2.iteritems():\n inter_options=[]\n for opt in options:\n print (ent)\n cols=opt[opt.entity==ent].list.unique().tolist()\n for each in cols:\n temp_option=opt[((opt.entity==ent) & (opt.list==each)) | (opt.entity!=ent)]\n inter_options+=[temp_option]\n\n \n options=inter_options \n \n \n \n return options","sub_path":"entity_options.py","file_name":"entity_options.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"286047227","text":"#!/usr/bin/env /usr/bin/python\n\nimport re\nimport pages\nimport resources\nimport error_pages\n\ndef application(environ, start_response):\n \"\"\"\n The main function; all this does is route requests to the proper handler.\n Possibly too brittle for deployment.\n \"\"\"\n path = environ.get('PATH_INFO', '').lstrip('/')\n if 'QUERY_STRING' in environ and environ['QUERY_STRING']:\n path += '?' + environ['QUERY_STRING']\n for url, handler in URLS:\n match = re.search(url, path)\n if match:\n environ['args'] = match.group()\n return handler(environ, start_response)\n return error_pages.error404(environ, start_response)\n\nclass Error500Middleware(object):\n \"\"\"\n A middleware to catch 500 (internal server) errors and display a nice page.\n Adapted from http://lucumr.pocoo.org/2007/5/21/getting-started-with-wsgi/.\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n \"\"\"Call the application and catch exceptions.\"\"\"\n # just call the application and send the output back\n # unchanged but catch exceptions\n try:\n return self.app(environ, start_response)\n # if an exception occurs we return a 500 page\n except:\n return error_pages.error500(environ, start_response)\n\n # wsgi applications might have a close function. If it exists\n # it *must* be called.\n if hasattr(self.app, 'close'):\n self.app.close()\n\nclass StripTrailingURLMiddleware(object):\n \"\"\"\n \"\"\"\n def __init__(self, app):\n self.app = app\n def __call__(self, environ, start_response):\n if environ['PATH_INFO'][-1] == '/' and len(environ['PATH_INFO']) > 1: #Trailing slash\n environ['PATH_INFO'] = environ['PATH_INFO'][:-1]\n start_response('301 Redirect', [('Location', environ['PATH_INFO'])])\n return[]\n return self.app(environ,start_response)\n\n\n#application = StripTrailingURLMiddleware(application)\napplication = Error500Middleware(StripTrailingURLMiddleware(application))\n\nURLS = [\n (r'^$', pages.home),\n (r'^(?i)search/?$', pages.search),\n (r'^(?i)search\\?(.+)$', pages.results),\n (r'^(?i)resource/text/(.+)$', resources.resource_text),\n (r'^(?i)resource/image/(.+)$', resources.resource_image),\n (r'^(?i)resource/audio/(.+)$', resources.resource_audio),\n (r'^outside_resources/?$', pages.outside_resources),\n (r'^500/?$', error_pages.error500), #For testing\n (r'^(?i)resource/tei/(.+)$', resources.resource_tei),\n (r'^(?i)timeline/?$', pages.timeline),\n (r'^(?i)finding_aids/?$', pages.finding_aids), \n ]\n\n\nif __name__ == '__main__':\n from wsgiref.simple_server import make_server\n srv = make_server('localhost', 8000, application)\n srv.serve_forever()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"344763874","text":"import os\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestRegressor\n\nPATH = '~/Documents/research/datasets/'\nFILE = 'housing.csv'\n\n\ndef data_loader_csv(path=PATH, file=FILE):\n csv_path = os.path.join(path, file)\n dataset = pd.read_csv(csv_path)\n\n return dataset\n\n\ndef split_train_test(data, test_ratio):\n shuffled_indicies = np.random.permutation(len(data))\n test_set_size = int(test_ratio * len(data))\n test_indicies = shuffled_indicies[:test_set_size]\n train_indicies = shuffled_indicies[test_set_size:]\n X, y = data.iloc[train_indicies], data.iloc[test_indicies]\n\n return X, y\n\n\ndef custom_df_changes():\n housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)\n housing['income_cat'].where(housing['income_cat'] < 5, 5, inplace=True)\n\n return\n\n\ndef california_plot():\n housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4,\n s=housing['population'] / 100, label='population',\n figsize=(10, 5), c='median_house_value',\n cmap=plt.get_cmap('jet'), colorbar=True)\n plt.legend()\n plt.show()\n\n return\n\n\ndef splitter():\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=33)\n for train_idx, test_idx in split.split(housing, housing['income_cat']):\n strat_train, strat_test = housing.loc[train_idx], housing.loc[test_idx]\n\n return strat_train, strat_test\n\n\ndef fill_na_values(dataframe, metric='median'):\n SimpleImputer = SimpleImputer(metric)\n dataframe_numeric = dataframe.drop('ocean_proximity', axis=1)\n cleaned_array = SimpleImputer.fit_transform(dataframe_numeric)\n\n return pd.DataFrame(cleaned_array, columns=dataframe_numeric.columns)\n\n\ndef ordinal_encode_values():\n housing_cat = housing[['ocean_proximity']]\n ordinal_encoder = OrdinalEncoder()\n housing_cat_ordinal = ordinal_encoder.fit_transform(housing_cat)\n\n return housing_cat_ordinal\n\n\ndef one_hot_encode_values():\n housing_cat = housing[['ocean_proximity']]\n onehot_encoder = OneHotEncoder(sparse=False)\n housing_cat_onehot = onehot_encoder.fit_transform(housing_cat)\n\n return housing_cat_onehot\n\n\ndef get_col_ix(X, cols=None):\n all_cols = X.columns.values\n all_cols_dict = dict(zip(all_cols, range(len(all_cols))))\n subset_col_nums = []\n for col, num in all_cols_dict.items():\n if col in cols:\n subset_col_nums.append(num)\n\n return subset_col_nums\n\n\nclass MyTransformer(BaseEstimator, TransformerMixin):\n\n def __init__(self, add_br_v_room=True):\n self.add_br_v_room = add_br_v_room\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n rooms_v_house = X[:, rooms_ix] / X[:, household_ix]\n pop_v_house = X[:, population_ix] / X[:, household_ix]\n\n if self.add_br_v_room:\n br_v_room = X[:, bedrooms_ix] / X[:, rooms_ix]\n return np.c_[X, rooms_v_house, pop_v_house, br_v_room]\n\n else:\n return np.c_[X, rooms_v_house, pop_v_house]\n\n\ndef add_extra_features(X, add_br_v_room=True):\n rooms_ix, bedrooms_ix, population_ix, household_ix = [\n list(housing.columns.values).index(col)\n for col in (\"total_rooms\", \"total_bedrooms\", \"population\", \"households\")]\n\n rooms_v_house = X[:, rooms_ix] / X[:, household_ix]\n pop_v_house = X[:, population_ix] / X[:, household_ix]\n\n if add_br_v_room:\n br_v_room = X[:, bedrooms_ix] / X[:, rooms_ix]\n return np.c_[X, rooms_v_house, pop_v_house, br_v_room]\n\n else:\n return np.c_[X, rooms_v_house, pop_v_house]\n\n\ndef display_scores(model, scores):\n print('\\n', model.upper(), \":\")\n print('Scores:\\t', scores)\n print('Mean:\\t', int(np.mean(scores)))\n print('St.Dev:\\t', int(np.std(scores)))\n\n\nif __name__ == '__main__':\n housing = data_loader_csv()\n\n X = housing.drop('median_house_value', axis=1)\n y = housing['median_house_value'].copy()\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=233)\n\n categorical_attributes = ['ocean_proximity']\n numerical_attributes = list(X_train.drop(categorical_attributes, axis=1))\n\n num_pipeline = Pipeline([\n ('imputer', SimpleImputer(strategy='median')),\n ('attr_adder', FunctionTransformer(add_extra_features, validate=False)),\n ('std_scaler', StandardScaler())\n ])\n\n full_pipeline = ColumnTransformer([\n ('numerical', num_pipeline, numerical_attributes),\n ('categorical', OneHotEncoder(), categorical_attributes),\n ])\n\n housing_prepared = full_pipeline.fit_transform(X_train)\n\n # Model Selection\n\n # Linear Model\n lin_reg = LinearRegression()\n lin_reg.fit(housing_prepared, y_train)\n preds = lin_reg.predict(housing_prepared)\n\n lin_rmse = np.sqrt(mean_squared_error(y_train, preds))\n lin_mae = mean_absolute_error(y_train, preds)\n\n lin_scores = cross_val_score(lin_reg, housing_prepared,\n y_train, scoring='neg_mean_squared_error', cv=10)\n\n display_scores('Linear Model', np.sqrt(-lin_scores))\n\n # print(\"Linear RMSE:\\t\", int(lin_rmse))\n # print(\"Linear MAE:\\t\", int(lin_mae))\n\n # Decision Tree Regressor Model\n tree_reg = DecisionTreeRegressor(random_state=42)\n tree_reg.fit(housing_prepared, y_train)\n tree_scores = cross_val_score(tree_reg, housing_prepared,\n y_train, scoring='neg_mean_squared_error', cv=10)\n display_scores('Decision Tree', np.sqrt(-tree_scores))\n # preds = tree_reg.predict(housing_prepared)\n # tree_rmse = np.sqrt(mean_squared_error(preds, y_train))\n # tree_mae = mean_absolute_error(preds, y_train)\n # print(\"Tree MAE:\\t\", int(tree_mae))\n\n # Random Forest\n rf_reg = RandomForestRegressor(n_estimators=10, random_state=233)\n rf_reg.fit(housing_prepared, y_train)\n rf_reg_scores = cross_val_score(rf_reg, housing_prepared,\n y_train, scoring='neg_mean_squared_error', cv=10)\n display_scores('Random Forest', np.sqrt(-rf_reg_scores))\n","sub_path":"ca_housing.py","file_name":"ca_housing.py","file_ext":"py","file_size_in_byte":6812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"36305997","text":"from channels.generic.websocket import AsyncWebsocketConsumer\nfrom django.contrib.auth.models import User\nfrom .models import Comment\nimport json\n\nclass CommentConsumer(AsyncWebsocketConsumer):\n\n\n async def connect(self):\n self.task_id = self.scope['url_route']['kwargs']['task_id']\n self.task_group_name = 'comment_%s' % self.task_id\n\n await self.channel_layer.group_add(\n self.task_group_name,\n self.channel_name\n )\n\n await self.accept()\n\n\n async def disconnect(self, close_code):\n await self.channel_layer.group_discard(\n self.task_group_name,\n self.channel_name\n )\n\n\n async def receive(self, text_data):\n text_data_json = json.loads(text_data)\n type = text_data_json['type']\n\n if type == 'comment_message':\n message = text_data_json['message']\n user = text_data_json['user']\n task = self.task_id\n get_user = User.objects.get(id=int(user))\n\n #insertDB\n addComment = Comment(comment=message,task_id=task,user=get_user)\n addComment.save()\n\n await self.channel_layer.group_send(\n self.task_group_name,\n {\n 'type': type,\n 'message': message,\n 'user':get_user.username,\n 'comment_id':addComment.id\n }\n )\n\n elif type == 'delete_comment':\n comment_id = text_data_json['comment_id']\n\n #insertDB\n deleteComment = Comment(id=comment_id)\n deleteComment.delete()\n\n await self.channel_layer.group_send(\n self.task_group_name,\n {\n 'type': type,\n 'comment_id': comment_id\n }\n )\n elif type == 'edit_comment':\n comment_id = text_data_json['comment_id']\n edited_comment = text_data_json['edited_comment']\n\n #updateDB\n updateComment = Comment.objects.filter(id=comment_id).update(comment=edited_comment)\n\n await self.channel_layer.group_send(\n self.task_group_name,\n {\n 'type': type,\n 'comment_id': comment_id,\n 'comment':edited_comment\n }\n )\n\n\n async def comment_message(self, event):\n\n message = event['message']\n user = event[\"user\"]\n comment_id = event[\"comment_id\"]\n\n await self.send(text_data=json.dumps({\n 'type':'comment_message',\n 'message': message,\n 'comment_id':comment_id,\n 'user':user,\n }))\n\n async def delete_comment(self, event):\n comment_id = event[\"comment_id\"]\n\n await self.send(text_data=json.dumps({\n 'type':'delete_comment',\n 'comment_id':comment_id\n }))\n\n async def edit_comment(self, event):\n comment_id = event[\"comment_id\"]\n comment = event[\"comment\"]\n\n await self.send(text_data=json.dumps({\n 'type':'edit_comment',\n 'comment_id':comment_id,\n 'message':comment,\n }))\n","sub_path":"todolist/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"430632147","text":"import math\r\nfrom torch import nn\r\nfrom torch.autograd import Function\r\nimport torch\r\n\r\nimport gru_cpp\r\nimport pdb\r\n\r\ntorch.manual_seed(42)\r\n\r\nclass GRUFunction(Function):\r\n @staticmethod\r\n def forward(ctx, input, x2h_w, h2h_w, x2h_b, h2h_b, old_h):\r\n x = input.view(-1, input.size(1))\r\n outputs = gru_cpp.forward(x, x2h_w, h2h_w, x2h_b, h2h_b, old_h)\r\n new_h = outputs[0]\r\n variables = outputs[1:] + [old_h, x, x2h_w, h2h_w]\r\n ctx.save_for_backward(*variables)\r\n\r\n return new_h\r\n\r\n @staticmethod\r\n def backward(ctx, grad_hy):\r\n grad_input_weights, grad_hidden_weights, grad_input_bias, grad_hidden_bias, grad_hx = gru_cpp.backward(\r\n grad_hy, *ctx.saved_variables\r\n )\r\n return None, grad_input_weights, grad_hidden_weights, grad_input_bias, grad_hidden_bias, grad_hx\r\n\r\nclass GRUCell(nn.Module):\r\n def __init__(self, input_features, state_size):\r\n super(GRUCell, self).__init__()\r\n self.input_features = input_features\r\n self.state_size = state_size\r\n self.x2h_weights = nn.Parameter(torch.Tensor(3 * state_size, input_features))\r\n self.h2h_weights = nn.Parameter(torch.Tensor(3 * state_size, state_size))\r\n self.x2h_bias = nn.Parameter(torch.Tensor(1, 3 * state_size))\r\n self.h2h_bias = nn.Parameter(torch.Tensor(1, 3 * state_size))\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n std = 1.0 / math.sqrt(self.state_size)\r\n for w in self.parameters():\r\n w.data.uniform_(-std, std)\r\n\r\n def forward(self, input, state):\r\n input = input.view(-1, input.size(1))\r\n\r\n return GRUFunction.apply(\r\n input, \r\n self.x2h_weights, self.h2h_weights,\r\n self.x2h_bias, self.h2h_bias,\r\n state \r\n )","sub_path":"gru/cpp/gru.py","file_name":"gru.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"611726181","text":"# -*- encoding: utf-8 -*-\nimport time\nfrom purchase.report import order\nfrom purchase.report import request_quotation\nfrom openerp.report import report_sxw\n\n\nclass order(order.order):\n def __init__(self, cr, uid, name, context):\n super(order, self).__init__(cr, uid, name, context=context)\n\nreport_sxw.report_sxw('report.purchase.order.custom',\n 'purchase.order',\n 'addons/typ_purchase_reports/report/order.rml',\n parser=order,\n header='external')\n\n\nclass request_quotation(request_quotation.request_quotation):\n def __init__(self, cr, uid, name, context):\n super(request_quotation, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'time': time,\n 'user': self.pool.get('res.users').browse(cr, uid, uid, context)\n })\n\nreport_sxw.report_sxw('report.purchase.quotation.custom',\n 'purchase.order',\n 'addons/typ_purchase_reports/report/request_quotation.rml',\n parser=request_quotation,\n header='external')\n","sub_path":"report/parsers/base_parser.py","file_name":"base_parser.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"157977714","text":"a, b = map(int, input().split())\n\nfor p in range(10**4):\n tax8 = p*0.08\n tax10 = p*0.1\n\n if int(tax8)==a and int(tax10)==b:\n print(p)\n exit()\n\nprint(-1)","sub_path":"atcoder/2020/ABC/0307_abc158/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"394239875","text":"import torch\nimport torch.nn as nn\n\nfrom .state import State\nfrom .base_rnn import BaseRNN\nfrom ..utils import sequence_mask\nfrom ..attention import Attention\nfrom ..base_module import BaseModule\n\n\n@BaseModule.register(\"rnn_decoder\")\nclass RNNDecoder(BaseRNN):\n def __init__(\n self,\n output_size,\n hidden_size,\n embedding,\n num_layers=1,\n cell_type=\"gru\",\n attn_mode=\"mlp\",\n dropout_prob=0.1,\n ):\n \"\"\"\n Args:\n\n output_size (int): The size of output feature,\n usually the vocabulary size.\n\n hidden_size (int): Number of rnn output features.\n\n\t\t\tembedding (nn.Embedding): Embedding layer.\n\n\t\t\tnum_layers (int): Number of recurrent layers.\n\n\t\t\tcell_type (str): Type of RNN cell, 'gru' or 'lstm'.\n\n attn_mode (str): Which attention score mechanism to use: 'dot', 'general' or 'mlp'.\n\n dropout_prob (float): The probability for Dropout.\n \"\"\"\n\n super(RNNDecoder, self).__init__(hidden_size, embedding, num_layers, cell_type)\n\n self.output_size = output_size\n self.attn_mode = attn_mode\n self.dropout_prob = dropout_prob\n\n self.attn = Attention(query_dim=self.hidden_size, mode=self.attn_mode)\n\n self.rnn = self.rnn(\n input_size=self.input_size + self.hidden_size,\n hidden_size=self.hidden_size,\n batch_first=True,\n num_layers=self.num_layers,\n )\n\n # output projection layer: project RNN outputs to output_size\n self.output_projection = nn.Sequential(\n nn.Dropout(self.dropout_prob),\n nn.Linear(self.hidden_size * 2, self.output_size),\n nn.LogSoftmax(dim=-1),\n )\n\n def decode_step(self, inputs, state, is_training=False):\n \"\"\" A decoding process for a time step.\n\n Args:\n inputs (torch.Tensor): Of shape (batch), RNN inputs.\n\n state (rnn.State): At least contains:\n - last_hidden (torch.Tensor): Of shape (num_layers, batch, hidden_size),\n containing the hidden state for t = seq_len from encoder.\n \n - context (torch.Tensor): Of shape (batch, seq_len, hidden_size),\n containing the output features (h_t) for each t from the encoder.\n\n is_training: (bool): Indicate that if is training.\n\n Returns:\n log_probs (torch.Tensor): Of shape (batch, 1, output_size)).\n\n state (rnn.State): At least contains:\n - last_hidden (torch.Tensor): Of shape (num_layers, batch, hidden_size).\n \n - outputs (torch.Tensor): Of shape (batch, 1, hidden_size * 2), RNN outputs.\n \"\"\"\n rnn_inputs = []\n outputs = []\n\n # 1. Embed the inputs\n inputs = self.embedding(inputs) # shape: (batch, embed_size)\n inputs = inputs.unsqueeze(1) # shape: (batch, 1, embed_size)\n\n rnn_inputs.append(inputs)\n\n # 2. Attend the context\n # use the hidden state of last layer as query\n query = state.last_hidden[-1].unsqueeze(1) # shape: (batch, 1, hidden_size)\n\n # shape: (batch, 1, hidden_size), (batch, 1, seq_len)\n weighted_context, attn_weights = self.attn(query=query, values=state.context)\n\n rnn_inputs.append(weighted_context)\n outputs.append(weighted_context)\n\n # 3. Input feed: concat the raw inputs and weighted contex\n # as actual RNN inputs.\n rnn_inputs = torch.cat(\n rnn_inputs, dim=-1\n ) # shape: (batch, 1, input_size + hidden_size)\n\n # 4. Feed to RNN\n\n if self.num_layers == 1:\n hidden = state.last_hidden[-1].unsqueeze(dim=0)\n else:\n hidden = state.last_hidden\n\n # TODO: LSTM need cell state\n if self.cell_type == \"lstm\":\n rnn_outputs, (last_hidden, _) = self.rnn(rnn_inputs, (hidden, hidden))\n else:\n rnn_outputs, last_hidden = self.rnn(rnn_inputs, hidden)\n\n outputs.append(rnn_outputs)\n outputs = torch.cat(outputs, dim=-1) # shape: (batch, 1, hidden_size * 2)\n\n # 5. Update state\n state.last_hidden = last_hidden\n\n if is_training:\n return outputs, state, attn_weights\n else:\n log_probs = self.output_projection(outputs)\n return log_probs, state, attn_weights\n\n def forward(self, inputs, state):\n \"\"\"\n Args:\n inputs (torch.Tensor): A tuple of \n - inputs (torch.Tensor): Of shape (batch, max_len)\n - lengths (torch.Tensor): Of shape (batch).\n\n state (rnn.State): At least contains:\n - last_hidden (torch.Tensor): Of shape (num_layers, batch, hidden_size),\n containing the hidden state for t = seq_len from encoder.\n \n - context (torch.Tensor): Of shape (batch, seq_len, hidden_size),\n containing the output features (h_t) for each t from the encoder.\n\n Returns:\n log_probs (torch.Tensor): Of shape (batch, max_len, output_size)).\n\n state (rnn.State): At least contains:\n - last_hidden (torch.Tensor): Of shape (num_layers, batch, hidden_size).\n \n - outputs (torch.Tensor): Of shape (batch, max_len, hidden_size * 2), RNN outputs.\n \"\"\"\n\n # 1. Extract input and lengths\n inputs, lengths = inputs\n batch, max_len = inputs.size()\n\n # 2. Initialize outputs tensor\n outputs = torch.zeros(batch, max_len, self.hidden_size * 2, dtype=torch.float)\n if torch.cuda.is_available():\n outputs = outputs.cuda()\n\n # 3. Prepare inputs for each time step\n # 3.1 Sort the sequences according to lengths.\n sorted_lengths, indices = lengths.sort(descending=True)\n\n # 3.2 Re-order the inputs\n inputs = inputs.index_select(dim=0, index=indices)\n\n # 3.3 Re-order the state\n state = state.index_select(indices)\n\n # 3.4 Compute the valid inputs for each time step\n # i.e filter out the padding elements.\n nums_valid = sequence_mask(sorted_lengths, max_len=max_len).int().sum(dim=0)\n\n # 4. decode for each time step\n for i, num_valid in enumerate(nums_valid):\n\n # 4.1 Prepare the step inputs\n # `num_valid` - minimize the batch\n # `i` - fetch the inputs for current time step\n step_inputs = inputs[:num_valid, i]\n\n # 4.2 Prepare the step state\n valid_state = state.slice_select(num_valid)\n\n # 4.3 Decode\n step_outputs, step_state, attn_weights = self.decode_step(\n step_inputs, valid_state, is_training=True\n )\n\n # 4.4 Update\n outputs[:num_valid, i] = step_outputs.squeeze(1)\n state.last_hidden[:, :num_valid] = step_state.last_hidden\n\n # 5. Revert the sort process\n _, reversed_indices = indices.sort()\n state = state.index_select(reversed_indices)\n outputs = outputs.index_select(dim=0, index=reversed_indices)\n\n # 6. Project outputs\n log_probs = self.output_projection(outputs)\n\n return log_probs, state\n","sub_path":"snow/modules/rnn/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":7311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"571809545","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom django_redis import get_redis_connection\n\n## 模板推送队列\ndef pushCheck(user_id, template_id):\n redis = get_redis_connection()\n p = redis.pipeline()\n p.rpush('template_check', template_id)\n p.lpush('edm_web_mail_template_point_queue', json.dumps({\n \"user_id\": user_id,\n 'template_id': template_id,\n }))\n p.execute()","sub_path":"edm_web1/app/template/utils/caches.py","file_name":"caches.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"7980845","text":"#!/usr/bin/env python3\n\nimport gym\n\n\nfrom garage.np.baselines import LinearFeatureBaseline\nfrom mylab.algos.go_explore import GoExplore\nfrom garage.tf.envs import TfEnv\nfrom mylab.envs.go_explore_atari_env import Pixel_GoExploreEnv, Ram_GoExploreEnv\nfrom mylab.policies.go_explore_policy import GoExplorePolicy\nfrom mylab.samplers.batch_sampler import BatchSampler\n\nimport fire\nimport os\nimport numpy as np\nfrom skimage.measure import block_reduce\nfrom garage.misc.overrides import overrides\nfrom garage.experiment import LocalRunner, run_experiment, SnapshotConfig\nfrom garage.np.baselines import LinearFeatureBaseline\nfrom garage.tf.algos import TRPO\nimport garage.tf.core.layers as L\nfrom garage.tf.envs import TfEnv\nfrom garage.tf.optimizers import ConjugateGradientOptimizer\nfrom garage.tf.optimizers import FiniteDifferenceHvp\nfrom garage.tf.policies import CategoricalLSTMPolicy\n\n# class Pixel_GoExploreEnv(GoExploreTfEnv):\n# @overrides\n# def downsample(self, obs):\n# # import pdb; pdb.set_trace()\n# obs = np.dot(obs[..., :3], [0.299, 0.587, 0.114])\n# obs = block_reduce(obs, block_size=(20, 20), func=np.mean)\n# obs= obs.astype(np.uint8) // 32\n# return obs.flatten()\n#\n# class Ram_GoExploreEnv(GoExploreTfEnv):\n# @overrides\n# def downsample(self, obs):\n# # import pdb; pdb.set_trace()\n# return obs // 32\n\ndef runner(exp_name='montezuma',\n use_ram=False,\n db_filename='/home/mkoren/Scratch/cellpool-shelf.dat',\n max_db_size=150,\n overwrite_db=True,\n n_parallel=2,\n snapshot_mode='last',\n snapshot_gap=1,\n log_dir=None,\n max_path_length=100,\n discount=0.99,\n n_itr=100,\n max_kl_step=0.01):\n\n if overwrite_db and os.path.exists(db_filename):\n os.remove(db_filename)\n\n batch_size = max_path_length * n_parallel\n\n def run_task(snapshot_config, *_):\n with LocalRunner(snapshot_config=snapshot_config) as runner:\n # gym_env=gym.make('MontezumaRevenge-ram-v0')\n if use_ram:\n # gym_env = gym.make('MontezumaRevenge-ram-v0')\n # import pdb; pdb.set_trace()\n env = Ram_GoExploreEnv(env_name='MontezumaRevenge-ram-v0')\n # env = GoExploreTfEnv(env=gym_env)\n # pool=CellPool())\n # setattr(env, 'downsampler', ram_downsampler)\n else:\n # gym_env = gym.make('MontezumaRevenge-v0')\n # import pdb; pdb.set_trace()\n env = Pixel_GoExploreEnv(env_name='MontezumaRevenge-v0')\n # env = GoExploreTfEnv(env=gym_env)\n # # pool=CellPool())\n # setattr(env, 'downsampler',pixel_downsampler)\n\n policy = GoExplorePolicy(\n env_spec=env.spec)\n\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n\n\n\n algo = GoExplore(\n db_filename=db_filename,\n max_db_size=max_db_size,\n env=env,\n env_spec=env.spec,\n policy=policy,\n baseline=baseline,\n max_path_length=max_path_length,\n discount=discount,\n )\n # algo.train()\n #setup(self, algo, env, sampler_cls=None, sampler_args=None):\n sampler_cls = BatchSampler\n sampler_args = {'n_envs': n_parallel}\n\n runner.setup(algo=algo,\n env=env,\n sampler_cls=sampler_cls,\n sampler_args=sampler_args)\n runner.train(n_epochs=n_itr, batch_size=batch_size)\n\n # runner.setup(algo, env, sampler_args={'n_envs': 1})\n # runner.train(n_epochs=120, batch_size=5000,store_paths=False)\n\n\n run_experiment(\n run_task,\n snapshot_mode=snapshot_mode,\n log_dir=log_dir,\n exp_name=exp_name,\n snapshot_gap=snapshot_gap,\n seed=1,\n n_parallel=n_parallel,\n )\n\n\nif __name__ == '__main__':\n fire.Fire()\n","sub_path":"TestCases/Montezuma/go_explore_tf_montezuma.py","file_name":"go_explore_tf_montezuma.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"421637855","text":"from manticore.ethereum import ManticoreEVM\n\nETHER = 10**18\n\nm = ManticoreEVM()\n\nuser_account = m.create_account(balance=1000*ETHER)\nprint(\"[+] Creating a user account\", user_account)\n\nwith open('test.sol') as f:\n contract_account = m.solidity_create_contract(f, owner=user_account)\nprint(\"[+] Creating a contract account\", contract_account)\n\nprint(\"[+] Now the symbolic values\")\nsymbolic_value = m.make_symbolic_value(name=\"value\")\ncontract_account.solve(symbolic_value, caller=user_account)\n\nprint(\"[+] Resulting balances are:\")\nfor state in m.all_states:\n balance = state.platform.get_balance(int(user_account))\n print(state.solve_one(balance))\n\nm.finalize()\nprint(f\"[+] Look for results in {m.workspace}\")\n","sub_path":"4/complete.py","file_name":"complete.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"174346381","text":"import os.path\n\nimport click\nimport pandas\nimport sqlalchemy\nimport sqlalchemy.orm\n\nimport skopy.command\nimport skopy.feature\nimport skopy.task\n\n\n@click.command(\"measure\")\n@click.argument(\"metadata\", nargs=1, type=click.Path(exists=True))\n@click.option(\"--database\", default=\"sqlite:///measurements.sqlite\")\n@click.option(\"--distribute\", is_flag=True)\n@click.option(\"--verbose\", is_flag=True)\ndef command(metadata, database, distribute, verbose):\n if distribute:\n pass\n else:\n engine = sqlalchemy.create_engine(database, echo=verbose)\n\n skopy.feature.Base.metadata.drop_all(engine)\n\n skopy.feature.Base.metadata.create_all(engine)\n\n session = sqlalchemy.orm.sessionmaker()\n\n session.configure(bind=engine)\n\n session = session()\n\n directory = os.path.dirname(metadata)\n\n directory = os.path.abspath(directory)\n\n metadata = pandas.read_csv(metadata)\n\n progress = click.progressbar([(pathname, mask) for _, pathname, mask in metadata.itertuples()])\n\n with progress as metadata:\n for pathname, mask in metadata:\n pathname = os.path.join(directory, pathname)\n\n mask = os.path.join(directory, mask)\n\n if distribute:\n skopy.task.measure.delay(database, pathname, mask)\n else:\n image = skopy.feature.extract(pathname, mask)\n\n session.add(image)\n\n session.commit()\n","sub_path":"skopy/commands/command_measure.py","file_name":"command_measure.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"1914799","text":"import requests, time, os, pickle\r\nimport pandas as pd\r\n\r\n\r\ndef get_heros(load=True):\r\n\r\n path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"hero_list.pickle\")\r\n\r\n if load:\r\n\r\n with open(path, \"rb\") as file:\r\n\r\n data = pickle.load(file)\r\n\r\n return data\r\n\r\n data = requests.get(\"https://api.opendota.com/api/heroes\").json()\r\n data = pd.DataFrame(data)\r\n\r\n # Saving data section\r\n while True:\r\n\r\n response = input(\"Do you want to save the data locally? (Y/N)\")\r\n\r\n if response in [\"Y\", \"y\"]:\r\n\r\n with open(path, \"wb\") as file:\r\n\r\n pickle.dump(data, file)\r\n\r\n print(\"Saved to: {}\".format(path))\r\n\r\n return data\r\n\r\n elif response in [\"N\", \"n\"]:\r\n\r\n return data\r\n\r\n\r\ndef recent_hero_matches(hero_id_list, load=True):\r\n\r\n # A method which will retrieve the recent match data for all heros, \r\n # this will happen ~ once a day\r\n\r\n raw_data = dict()\r\n\r\n path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"recent_hero_matches.pickle\")\r\n\r\n # Try to obtain the data locally if the save path is supplied\r\n\r\n if load:\r\n\r\n with open(path, \"rb\") as file:\r\n\r\n raw_data = pickle.load(file)\r\n\r\n return raw_data\r\n\r\n for hero_id in hero_id_list:\r\n\r\n if hero_id % 10 == 0:\r\n\r\n print(\"Hero no: \", hero_id)\r\n\r\n try: raw_data[hero_id] = get_recent_win_rate(pd.DataFrame(requests.get(\"https://api.opendota.com/api/heroes/{}/matches\".format(hero_id)).json()))\r\n\r\n except:\r\n\r\n # Continue incase the recent win rate for a hero cannot be obtained\r\n continue\r\n\r\n # Wait a second due to API restraints\r\n time.sleep(1)\r\n\r\n # Saving data section\r\n while True:\r\n\r\n response = input(\"Do you want to save the data locally? (Y/N)\")\r\n\r\n if response in [\"Y\", \"y\"]:\r\n\r\n with open(path, \"wb\") as file:\r\n\r\n pickle.dump(raw_data, file)\r\n\r\n print(\"Saved to: {}\".format(path))\r\n\r\n return raw_data\r\n\r\n elif response in [\"N\", \"n\"]:\r\n\r\n return raw_data\r\n\r\n\r\ndef get_recent_hero_stats(hero_n, heroStats):\r\n # Given the downloaded dataset and a specified list of heros, return the statistics which are relevant to that hero.\r\n \r\n return heroStats.loc[hero_n]\r\n\r\n\r\ndef get_recent_win_rate(recent_matches):\r\n\r\n radiant_wins = recent_matches.loc[(recent_matches[\"radiant\"] == True) & (recent_matches[\"radiant_win\"] == True)].shape[0]\r\n blight_wins = recent_matches.loc[(recent_matches[\"radiant\"] == False) & (recent_matches[\"radiant_win\"] == False)].shape[0]\r\n\r\n return (radiant_wins + blight_wins)/recent_matches.shape[0]\r\n\r\n\r\ndef get_hero_stats(load=True):\r\n\r\n path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"hero_stats.pickle\")\r\n\r\n if load:\r\n\r\n with open(path, \"rb\") as file:\r\n\r\n heroStats = pickle.load(file)\r\n\r\n return heroStats\r\n\r\n heroStats = pd.DataFrame(requests.get(\"https://api.opendota.com/api/heroStats\").json())\r\n \r\n # Saving data section\r\n while True:\r\n\r\n response = input(\"Do you want to save the data locally? (Y/N)\")\r\n\r\n if response in [\"Y\", \"y\"]:\r\n\r\n with open(path, \"wb\") as file:\r\n\r\n pickle.dump(heroStats, file)\r\n\r\n print(\"Saved to: {}\".format(path))\r\n\r\n return heroStats\r\n\r\n elif response in [\"N\", \"n\"]:\r\n\r\n return heroStats","sub_path":"Global_Hero_Methods.py","file_name":"Global_Hero_Methods.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"540070949","text":"import re\n\n\nfrom src.classes.Cables_ import Cables\nfrom src.classes.constant_ import Constants\nfrom src.classes.device_ import Device\nimport xmltodict\nimport logging\n\n\n\nclass Linux_Host(Device):\n def __init__(self, device_ip, device_name, device_type,linux_device,owner,group_name):\n if device_type == 'GEN4':\n super().__init__(device_ip, device_name, device_type, 'root', 'UFMcyberAI', linux_device, owner,group_name)\n elif device_type == 'GEN3':\n super().__init__(device_ip, device_name, device_type, 'root', 'UFMappliance', linux_device, owner,group_name)\n else:\n super().__init__(device_ip,device_name,device_type,'root','3tango',linux_device,owner,group_name)\n self.ilo_ip = None\n self.ilo_works = None\n self.ports = ['n/a','n/a','n/a','n/a']\n self.memory = 'n/a'\n self.ofed = 'n/a'\n self.os_version = 'n/a'\n self.dmidecode = 'n/a'\n self.kernel = 'n/a'\n self.ufm_chassis_health_docker_id = 'n/a'\n #start collecting information\n self.get_ilo_ip()\n self.check_ilo_works()\n if self.ssh_client:\n self.get_all_properties()\n logging.debug(\"finish building linux host class for \" + device_name)\n\n def get_kernel_version(self):\n logging.debug('Start get kernel version')\n try:\n cmd = r'''uname -r'''\n out = super().run_command(cmd)\n super().dump_file('get_kernel_version', out, Constants.root_servers)\n\n except Exception as e:\n logging.error('Exception in get kernel version: ' + str(e))\n logging.debug('End get kernel version')\n\n def get_all_properties(self):\n\n #if we don't have shell to host we can skip the function below:\n if self.ssh_client:\n self.get_hw_address()\n self.get_ports()\n self.get_memory()\n self.get_ofed()\n self.get_os_version()\n self.get_dmidecode()\n self.lshca()\n self.getServerModelandType()\n self.get_kernel_version()\n self.check_if_gpu_exist()\n if self.check_if_ufm_host():\n self.get_info_of_ufm_mode()\n self.save_ufm_data()\n self.check_if_ufm_is_running()\n if self.is_ufm_host_is_running and \\\n self.device_type != 'GEN2' and self.device_type != 'GEN3' and self.device_type != 'GEN4':\n self.Cables_obj = Cables(self.device_name,self.owner)\n\n\n def save_ufm_data(self):\n super().save_ufm_data()\n\n def get_list_of_dockers(self):\n\n try:\n logging.debug(f\"Trying to get container id of ufm-chassis-health docker on : {self.device_name}\")\n cmd =r\"\"\"sudo docker ps --filter \"mellanox/ufm-enterprise --format \"{{.ID}}\"\n \"\"\"\n output = str(self.run_command(cmd)).splitlines()[-1]\n if output:\n self.ufm_enterprise_docker_id = output\n except Exception as e:\n logging.error(f'Exception was catched in get list on docker for {self.device_name} : {str(e)}')\n\n def check_if_ufm_host(self):\n try:\n logging.debug(f'make sure one of the file exist in the host to determine ufm host')\n files = [f'/opt/ufm/version/release', \\\n f'/opt/ufm/chassis_health/ch-release', \\\n f'/opt/ha_data/ufm-enterprise/files/ufm_version']\n\n for f in files:\n cmd = f'ls {f}'\n output = self.run_command(cmd)\n if not 'No such' in output and output != \"\":\n logging.debug(f'{self.device_name} is ufm host')\n self.is_ufm_host = True\n self.get_ufm_version()\n return True\n else:\n logging.debug(f'{self.device_name} is not ufm host')\n self.is_ufm_host = False\n return False\n except Exception as e:\n logging.error(f'Execption in check if ufm host appears on {self.device_name} : {str(e)}')\n\n def check_if_ufm_is_running(self):\n proccess = ['opensm','ModelMain.pyc']\n logging.debug(f\"checking if {self.device_name} has ufm running \")\n for p in proccess:\n\n cmd = f\"ps -ef | grep {p}\"\n output = self.run_command(cmd=cmd, remove_asci='yes', )\n if re.findall(f\"\\Sopt\\S.*\\S{p}\", output):\n logging.debug(f\"server : {self.device_name} is running {p} \")\n else:\n logging.debug(f\"server : {self.device_name} is not running {p}\")\n self.is_ufm_host_is_running = False\n return\n\n logging.debug(f'ufm is running on : {self.device_name}')\n self.is_ufm_host_is_running = True\n\n\n\n def check_if_gpu_exist(self):\n try:\n logging.debug(f\"check if {self.device_name} has GPU:\")\n cmd = f\"nvidia-smi -q\"\n out = self.run_command(cmd)\n if 'no nvidia-smi' in out or not out or 'command not found' in out or 'but can be installed' in out:\n logging.debug(f\" {self.device_name} has no GPUs installed :\")\n elif f'''NVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver''' in out:\n logging.critical(f'Nvidia latest driver is not installed on {self.device_name}. Make sure to update the driver')\n else:\n logging.debug(f\" {self.device_name} has GPUs installed :\")\n try:\n #cmd = f\"nvidia-smi -q -x\"\n cmd = f\"nvidia-smi --query-gpu=gpu_name,count --format=csv | uniq | tail -n +2\"\n out_csv = super().run_command(cmd)\n logging.debug(f\"command has run succussfully : {cmd} on {self.device_name}\")\n data_dict = Linux_Host.parse_csv_to_json(out_csv)\n #data_dict = self.xml_to_json(out_xml=out)\n super().save_gpu_version(data_dict)\n except Exception as e:\n logging.error(f\"Exception while running {cmd} : {str(e)}\")\n except Exception as e:\n logging.error(f\"Exception occured in get ufm version for {self.device_name}: {str(e)}\")\n\n @staticmethod\n def parse_csv_to_json(out_csv):\n logging.debug(f'trying to parse csv to json')\n try:\n dict = {}\n lines = str(out_csv).split(',')\n dict['GPU_Name'] = lines[0]\n dict['GPU_Count'] = lines[1]\n logging.debug(f'parsing csv to json finished succussfully')\n return dict\n except Exception as e:\n logging.error(f'Exception receivced in parsing csv to json : {str(e)}')\n def xml_to_json(self,out_xml):\n logging.debug(f\"convert xml file into dictionary\")\n try:\n my_dict = xmltodict.parse(out_xml)\n logging.debug(f\"finished convert xml file into dictionary\")\n return my_dict\n except Exception as e:\n logging.error(f\"Exception received in xml_to_json on device {self.device_name}\")\n\n\n def get_ufm_version(self):\n try:\n if self.device_type == 'GEN3' or self.device_type == 'GEN4':\n if self.device_type == 'GEN3':\n files = [f'/opt/ufm/chassis_health/ch-release','/opt/ha_data/ufm-enterprise/files/ufm_version']\n else:\n #GEN4\n files = [f'/opt/ha_data/ufm-enterprise/files/ufm_version' \\\n , r\"\"\"/etc/ufm-release | grep -oP '(?<=UFM_CYBERAI_APPLIANCE=)[0-9]+\\.[0-9]+\\.[0-9]+-[0-9]+' | cut -d'=' -f2\"\"\"]\n final_version =''\n for file_path in files:\n cmd = f'sudo cat {file_path}'\n out = super().run_command(cmd)\n if 'No such file or directory' in out:\n self.ufm_version = 'Not Installed'\n logging.debug(f'UFM is not installed on : {self.device_name}')\n return\n final_version += out + ' - '\n final_version = final_version[:-2]\n self.ufm_version = final_version\n logging.debug(f\"found ufm version for {self.device_name} : {self.ufm_version}\")\n return\n else:\n cmd = f\" cat /opt/ufm/version/release \"\n out = super().run_command(cmd)\n if out:\n self.ufm_version = out\n self.ufm_version = str(self.ufm_version.replace(\"build\", '.')).replace(\" \", \"\")\n logging.debug(f\"found ufm version for {self.device_name} : {self.ufm_version}\")\n else:\n logging.debug(f\"didn't find ufm version for {self.device_name}\")\n except Exception as e:\n logging.error(f\"Exception occured in get ufm version for {self.device_name}: {str(e)}\")\n\n def getModel(self):\n logging.info('Starting Get Model function for device : ' + str(self.device_name))\n try:\n cmd = r'''dmidecode | grep -A3 '^System Information' | grep Product | cut -d ':' -f 2'''\n out = super().run_command(cmd)\n super().dump_file('product_model', out, Constants.root_servers)\n\n except Exception as e:\n logging.error('Exception in get model function : ' + str(e))\n\n def getManufacture(self):\n logging.info('Starting Get Manufacture function for device : ' + str(self.device_name))\n try:\n cmd = r'''dmidecode | grep -A3 '^System Information' | grep Manufacture | cut -d ':' -f 2'''\n out = super().run_command(cmd)\n super().dump_file('manufacture', out, Constants.root_servers)\n\n except Exception as e:\n logging.error('Exception in manufacture function : ' + str(e))\n\n def getServerModelandType(self):\n self.getManufacture()\n self.getModel()\n\n def lshca(self):\n logging.info('Starting lshca function for device : ' + str(self.device_name))\n try:\n tools = ['python','/hpc/local/bin/lshca']\n flag = True\n cmd = '/hpc/local/bin/lshca -m normal -j -w roce'\n for tool in tools:\n if not self.is_tool_installed(tool):\n logging.debug(f'tool {tool} does not install on : {self.device_name}')\n flag = False\n break\n if not flag:\n logging.debug('tool does not install on : ' +self.device_name)\n else:\n out = super().run_command(cmd)\n if out:\n super().dump_file('lshca', out, Constants.root_hcas)\n else:\n logging.critical(f\"lshca return empty for : {str(self.device_name)}\")\n\n except Exception as e:\n logging.error('Exception in lshca function : ' + str(e))\n\n def get_all_values(self):\n #Owner,Device Name,Device_type, MGMT_ip, MGMT Ping, ilo IP, ilo ping. HW address, CA Type#1, CA Type #2, CA Type#3, CA Type#4, Total Memory, OFED Version, OS Version, dmidecode\n return self.owner,self.group_name,self.device_name, self.device_type, self.ip, self.ip_reply, self.ilo_ip, self.ilo_works, self.hw_address,\\\n self.ports[0],self.ports[1],self.ports[2],self.ports[3], self.memory, self.ofed, self.os_version, self.dmidecode\n \n def get_dmidecode(self):\n self.dmidecode = super().get_dmidecode()\n\n def get_os_version(self):\n logging.debug(\"trying to get os version for : \" + self.device_name)\n cmd = 'cat /etc/*-re*'\n out = super().run_command(cmd)\n if out:\n rows = out.splitlines()\n for row in rows:\n if 'PRETTY_NAME=' in row:\n os = row.split('=')[1].replace(\"\\\"\",'')\n logging.debug(\"os version for device : \" + self.device_name + \" is\" + str(os))\n self.os_version = os\n break\n else:\n logging.critical(\"Couldn't find os version for \" + self.device_name)\n\n def get_ofed(self):\n self.ofed = super().get_ofed_version()\n\n\n\n\n def get_memory(self):\n self.memory = super().find_total_memory()\n\n def get_ports(self):\n self.ports = super().get_ports()\n \n def get_hw_address(self):\n super().set_hw_address()\n\n def check_ilo_works(self):\n if super().ping_device_pyping(self.ilo_ip):\n self.ilo_works = 'Yes'\n else:\n self.ilo_works = 'No'\n\n\n\n\n def get_ilo_ip(self):\n self.ilo_ip = super().get_device_ilo()\n","sub_path":"src/classes/linux_host_.py","file_name":"linux_host_.py","file_ext":"py","file_size_in_byte":12725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"432906106","text":"#\n# @lc app=leetcode.cn id=107 lang=python3\n#\n# [107] 二叉树的层次遍历 II\n#\n# https://leetcode-cn.com/problems/binary-tree-level-order-traversal-ii/description/\n#\n# algorithms\n# Easy (63.53%)\n# Likes: 172\n# Dislikes: 0\n# Total Accepted: 36.5K\n# Total Submissions: 57.3K\n# Testcase Example: '[3,9,20,null,null,15,7]'\n#\n# 给定一个二叉树,返回其节点值自底向上的层次遍历。 (即按从叶子节点所在层到根节点所在的层,逐层从左向右遍历)\n# \n# 例如:\n# 给定二叉树 [3,9,20,null,null,15,7],\n# \n# ⁠ 3\n# ⁠ / \\\n# ⁠ 9 20\n# ⁠ / \\\n# ⁠ 15 7\n# \n# \n# 返回其自底向上的层次遍历为:\n# \n# [\n# ⁠ [15,7],\n# ⁠ [9,20],\n# ⁠ [3]\n# ]\n# \n# \n#\n\n# @lc code=start\n# Definition for a binary tree node.\nimport queue\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def levelOrderBottom(self, root: TreeNode) -> [[int]]:\n if not root: return []\n q,ans = queue.Queue(),[]\n q.put((root,0))\n while not q.empty():\n tmp = q.get()\n node,curLevel = tmp[0],tmp[1]\n if curLevel > len(ans)-1:\n ans.append([node.val])\n else:\n ans[curLevel].append(node.val)\n if node.left:\n q.put((node.left,curLevel+1))\n if node.right:\n q.put((node.right,curLevel+1))\n return ans[::-1]\n\ndef deserializeTree(nums: [int]) -> TreeNode:\n if len(nums) <= 0:\n return None\n root = TreeNode(nums.pop(0))\n if root == None: return root\n Q = [root]\n while len(nums):\n Q[0].left = TreeNode(nums.pop(0))\n if Q[0].left != None: Q.append(Q[0].left)\n \n if len(nums) == 0:\n break\n Q[0].right = TreeNode(nums.pop(0))\n if Q[0].right != None: Q.append(Q[0].right)\n Q.pop(0)\n return root\nif __name__ == \"__main__\":\n nums = [5,4,5,1,1,5]\n a = deserializeTree(nums)\n print(Solution().levelOrderBottom(a))\n \n\n# @lc code=end\n\n","sub_path":"107.二叉树的层次遍历-ii.py","file_name":"107.二叉树的层次遍历-ii.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"396527971","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import PoseStamped, TwistStamped, Twist\nfrom sensor_msgs.msg import NavSatFix \n#from mavros_msgs.srv import CommandBool, SetMode\nfrom mavros_msgs.msg import State \nfrom mavros_msgs.srv import *\n\n#Initializing with opposite values\nconnected = False\narmed = False\nmode = \"GUIDED\" \n\n#Global pos variables initialization\nlatitude =0.0\nlongitude=0.0\naltitude =0.0\n#Global velocity variables initialization\nvel_x = 0.0\nvel_y = 0.0\nvel_z = 0.0\n#Global stable flight start time object\nstable_start_time = rospy.Time()\n#Flag for tasks\ntask_1 = False\ntask_2 = False\n\n#Current state of vehicle\ndef state_cb(msg):\n\tglobal connected\n\tglobal armed\n\tglobal mode\n\tconnected = msg.connected\n\tarmed = msg.armed\n\tmode = msg.mode\t\n\n#GPS position fix reported by the device.\ndef globalPositionCallback(globalPositionCallback):\n global latitude\n global longitude\n global altitude\n latitude = globalPositionCallback.latitude\n longitude = globalPositionCallback.longitude\n altitude = globalPositionCallback.altitude\n\n#Global Velocity fused by FCU\ndef globalvel(val):\n global vel_x \n global vel_y\n global vel_z\n vel_x = val.twist.linear.x\n vel_y = val.twist.linear.y\n vel_z = val.twist.linear.z\n\n#Disable pos signal after\ndef trigger(home_alt, task, time, target):\t\t\n\tglobal stable_start_time\n\n\trospy.loginfo('Duration = '+ str((rospy.Time.now() - stable_start_time).secs) +'Height = '+ str(altitude - home_alt))\t\n\n\t#recording stable flight time\t \n\tif(altitude - home_alt >= target or task ):\n\t\tif(rospy.Time.now() - stable_start_time > rospy.Duration(time) or task):#complete 10 sec stable flight at 2 m\n\t\t\trospy.loginfo('*****Task Completed*******'+'Duration = '+str((rospy.Time.now() - stable_start_time).secs) +'Home_alt = '+ str(home_alt))\n\t\t\ttask = True\t\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True #keep publishing\n\telse:\t\t\n\t\tduration = rospy.Time() #set to time = 0 if unstable from pos \n\t\tstable_start_time = rospy.Time.now()\t\t\t\n\t\treturn True \t\t#Task not achieved\n\n#Main program-----------------------------------\nif __name__ == '__main__':\n\trospy.init_node('offboard_node', anonymous=True)\n\n\trospy.Subscriber('/uav2/mavros/state', State, state_cb) #State - connection/arming/mode info\n\n\tlocal_pos_pub = rospy.Publisher('/uav2/mavros/setpoint_position/local', PoseStamped, queue_size = 10) #To give position\n\n\tset_mode_client = rospy.ServiceProxy('/uav2/mavros/set_mode', mavros_msgs.srv.SetMode)\t#Setting Mode\n\n\tarming_client = rospy.ServiceProxy('/uav2/mavros/cmd/arming', mavros_msgs.srv.CommandBool)\t#Arming\n\n\trospy.Subscriber(\"/uav2/mavros/global_position/raw/fix\", NavSatFix, globalPositionCallback) #Getting global gps pos\n\n\trospy.Subscriber(\"/uav2/mavros/local_position/velocity\", TwistStamped, globalvel) #local velocity show more precise values\t\n\n\t#Setting rate for repetition / sleep time\n\trate = rospy.Rate(20)\t\n\t\t\n\t#Verifying connection between MAVlink and autopilot\n\ttry:\n\t\twhile((not rospy.is_shutdown()) and (not connected)): \n\t\t\tprint('---Waiting for connection---')\n\t\t\trate.sleep() \n\texcept rospy.ROSInterruptException:\n\t\tpass\t\n\n\trospy.loginfo('Connection established')\n\t\n\t#Setting next coordinates\n\tpose = PoseStamped()\n\tpose.pose.position.x = 2\n\tpose.pose.position.y = 0\n\tpose.pose.position.z = 2\n\n\tpose_1 = PoseStamped()\n\tpose_1.pose.position.x = 2\n\tpose_1.pose.position.y = 4\n\tpose_1.pose.position.z = 4\n\n\t#Continuously publishing coordinates before flight\n\ttry:\n\t\tfor i in range(1,101):\n\t\t\tlocal_pos_pub.publish(pose)\n\t\t\tprint('Publishing position coordinates')\n\t\t\trate.sleep() \n\texcept rospy.ROSInterruptException:\n\t\tpass\t\n\n\tlast_request = rospy.Time.now() #Taking a record of time\n\n\ta = True\n\n\t#Main loop--------------------------\n\twhile not rospy.is_shutdown():\n\t\tif (mode != \"OFFBOARD\") and (rospy.Time.now() - last_request > rospy.Duration(5.0)): #Set mode to OFFBOARD if not\n\t\t\tprint('Setting mode')\n\t\t\tif set_mode_client(custom_mode = \"OFFBOARD\"):\n\t\t\t\trospy.loginfo('OFFBOARD enabled')\n\t\t\tlast_request = rospy.Time.now()\n\t\telse:\n\t\t\tif (not armed) and (rospy.Time.now() - last_request > rospy.Duration(5.0)): #ARM the vehicle if not \n\t\t\t\tprint('Arming vehicle')\n\t\t\t\tif arming_client(True).success:\n\t\t\t\t\trospy.loginfo('Vehicle armed')\n\t\t\t#last_request = rospy.Time.now()\n\t\t\t\t\n\t\t#Record of home altitude\n\t\t\n\t\tif(a):\n\t\t\thome_alt = altitude\n\t\t\ta = False\n\t\t\n\t\t#publishing pos in loop for continuously maintaining pos\n\t\tif(trigger(home_alt, task_1, 10.0, 1.8)):\n\t\t\tlocal_pos_pub.publish(pose)\n\t\t\tprint('POS PUBLISHED X=2, Z = 2')\n\t\telse:\n\t\t\ttask_1 = True\n\t\t\tif(trigger(home_alt, task_2, 15.0, 3.8)):\n\t\t\t\tlocal_pos_pub.publish(pose_1)\n\t\t\t\tprint('POS PUBLISHED y=4, Z = 4')\n\t\t\n\t\t\n\t\t\n\t\t#printing global position\n\t\trospy.loginfo(' Lon = '+ str(longitude) + ' Lat = ' +str(latitude) + ' Alt = ' + str(altitude))\n\t\tprint('---------------------------------------------------------------------------------')\n\t\t#rospy.loginfo(' Velocity X = '+ str(vel_x) + ' Y = ' +str(vel_y) + ' Z = ' + str(vel_z))\n\t\t\n\t\t#Sleep for 60/20 = 3s\n\t\ttry:\n\t\t\trate.sleep()\n\t\texcept rospy.ROSInterruptException:\n\t\t\tpass\t\t\n","sub_path":"..Constant Distance from other UAV (exercise code)/Master_UAV.py","file_name":"Master_UAV.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"587774747","text":"'''\nJinjer Url Path\n'''\nfrom django.urls import path\nfrom django.conf.urls import include, url\n\nfrom rest_framework import routers\n\nfrom .views import MainListViewSet, SubListViewSet, CheckInViewSet, CheckOutViewSet, index\n\nrouter = routers.SimpleRouter(trailing_slash=False)\nrouter.register(r'mainlist', MainListViewSet)\nrouter.register(r'sublist', SubListViewSet)\nrouter.register(r'checkin', CheckInViewSet)\nrouter.register(r'checkout', CheckOutViewSet)\n\napp_name = \"jinjer\"\nurlpatterns = [\n path('', index),\n url(r'^', include(router.urls)),\n]\n","sub_path":"app/jinjer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"247164502","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport os\nimport time\nimport random\nimport pickle\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nimport models\nfrom sys import stdout\n# from IPython import embed\n# from collections import Counter\n# from gensim.models import Word2Vec\nfrom tqdm import tqdm\n\nfrom load import load_data, load_test_data\nfrom config import rep_len, rec_len, RATE\n\nvisualize = False\ndevice = \"cuda\" if torch.cuda.is_available else \"cpu\"\n\ndef calculateRecall(dataset, at=10):\n datas = dataset['valid']\n # model.eval()\n recall10, recall5 = [], []\n print(\"> Calculating Recall ...\")\n for cou, data in enumerate(datas):\n input_data_rec, input_data_rep = zip(*data)\n input_data_rec = torch.tensor(input_data_rec, dtype=torch.long)\n input_data_rep = torch.tensor(input_data_rep, dtype=torch.long)\n\n input_data_rec = input_data_rec.to(device)\n input_data_rep = input_data_rep.to(device)\n\n pred = model(input_data_rec, input_data_rep)\n\n if args.attn == 1 or args.attn == 3:\n pred = pred[0]\n pred = nn.Sigmoid()(pred)\n out = pred.detach().cpu().numpy().argsort()[-at:][::-1].tolist()\n\n if 0 in out: recall10.append(1)\n else: recall10.append(0)\n if 0 in out[:5]: recall5.append(1)\n else: recall5.append(0)\n\n return np.mean(recall10), np.mean(recall5)\n\n\ndef data_generator(args, data, batch_size_origin, shuffle=True):\n if shuffle:\n used_data = random.sample(data, len(data))\n else:\n used_data = np.copy(data)\n\n batch_size = batch_size_origin\n num_data = len(used_data)\n\n global steps_per_epoch\n steps_per_epoch = num_data // batch_size\n\n for i in range(steps_per_epoch):\n start = i * batch_size\n end = (i + 1) * batch_size\n\n batch_data = used_data[start:end]\n input_data_rec, input_data_rep, labels = zip(*batch_data)\n\n input_data_rec = torch.tensor(input_data_rec, dtype=torch.long)\n input_data_rep = torch.tensor(input_data_rep, dtype=torch.long)\n labels = torch.tensor(labels, dtype=torch.float32)\n\n yield input_data_rec.to(device), input_data_rep.to(device), labels.to(device)\n\ndef old_train(args, epoch, dataset, objective):\n print(\"------------------------------------------\")\n gen = data_generator(args, dataset['train'], args.batch_size, shuffle=True) # generate train data\n\n t1 = time.time()\n epoch_loss = []\n epoch_acc = []\n\n for idx, (input_data_rec, input_data_rep, labels) in enumerate(gen):\n # Forward and backward.\n optimizer.zero_grad()\n\n pred = model(input_data_rec, input_data_rep)\n if args.attn == 1 or args.attn == 3:\n pred = pred[0]\n loss = objective(pred, labels)\n\n loss.backward()\n optimizer.step()\n\n loss = loss.data.cpu().item()\n acc = (nn.Sigmoid()(pred).round() == labels).float().cpu().tolist()\n epoch_loss.append(loss)\n epoch_acc.append(acc)\n\n Iter = 100.0 * (idx + 1) / steps_per_epoch\n stdout.write(\"\\rEpoch: {}/{}, Iter: {:.1f}%, Loss: {:.4f}, Acc: {:.4f}\".format(epoch,\n args.epochs,\n Iter,\n np.mean(epoch_loss),\n np.mean(epoch_acc)))\n if (idx + 1) % args.print_iter == 0 :\n print(\" \")\n\n print(\"\\n> Spends {:.2f} seconds.\".format(time.time() - t1))\n print(\"> The Training dataset Accuracy is {:.2f}%, Loss is {:.4f}\".format(np.mean(epoch_acc)*100,\n np.mean(epoch_loss)))\n\ndef trainInit(args):\n max_recall = 0\n word2idx, vectors = create_model(args)\n idx2word = {b:a for a,b in word2idx.items()}\n\n if args.model_load != None:\n print(\"> Loading trained model and Train\")\n max_recall = load_model(args.model_load)\n\n dataset = load_data(args, word2idx, vectors)\n objective = nn.BCEWithLogitsLoss(pos_weight=torch.FloatTensor([RATE])).to(device)\n\n return dataset, objective, word2idx, max_recall\n\ndef trainIter(args):\n max_recall = 0\n dataset, objective, word2idx, max_recall = trainInit(args)\n print(max_recall)\n for epoch in range(args.epochs):\n # if (epoch+1)%2 == 0:\n # dataset = load_data(args, word2idx)\n\n old_train(args, epoch+1, dataset, objective)\n with torch.no_grad():\n model.eval()\n score10, score5 = calculateRecall(dataset)\n model.train(True)\n\n print(f\"> Validation Recall10: {score10} and Recall5: {score5}\")\n\n if score10 > max_recall:\n max_recall = score10\n save_model(args, epoch, max_recall)\n\ndef create_model(args):\n print(\"> Create model.\")\n\n with open(os.path.join(args.data_path, \"dict&vectors.pkl\"), \"rb\") as f:\n [word2idx, vectors] = pickle.load(f)\n\n global model\n if args.attn == 1:\n hidden = args.hidden_size\n encoder1 = models.Encoder(hidden_size=hidden, nlayers=1)\n encoder2 = models.Encoder(input_size=hidden*2*4, hidden_size=hidden, nlayers=1)\n\n attention_dim = 128\n attention = models.Attention(attention_dim, attention_dim, attention_dim)\n\n model = models.Classifier(encoder1, encoder2, attention,\n hidden_size=hidden,\n rec_len=rec_len,\n rep_len=rep_len,\n num_of_words=len(word2idx),\n drop_p=args.drop_p)\n\n elif args.attn == 2:\n model = models.BiDAF(window_size=args.max_length,\n hidden_size=args.hidden_size,\n drop_p=args.drop_p,\n num_of_words=len(word2idx)\n )\n elif args.attn == 3:\n model = models.RNNatt(window_size=args.max_length,\n hidden_size=args.hidden_size,\n drop_p=args.drop_p,\n num_of_words=len(word2idx),\n rec_len=rec_len,\n rep_len=rep_len\n )\n elif args.attn == 4:\n model = models.RNNatt_weight(window_size=args.max_length,\n hidden_size=args.hidden_size,\n drop_p=args.drop_p,\n num_of_words=len(word2idx),\n rec_len=rec_len,\n rep_len=rep_len\n )\n else: # args.attn == 0\n model = models.RNNbase(window_size=args.max_length,\n hidden_size=args.hidden_size,\n drop_p=args.drop_p,\n num_of_words=len(word2idx)\n )\n\n\n model.word_embedding.load_state_dict({'weight': vectors.to(torch.float32)})\n model.word_embedding.weight.requires_grad = False\n\n model = model.to(device)\n print(model)\n\n global optimizer\n optimizer = optim.Adam(model.parameters(),\n lr=args.lr_rate) # , betas=(0.9, 0.999), weight_decay=1e-3)\n\n return word2idx, vectors\n\ndef save_model(args, epoch, max_recall):\n print(\">> Saving Model...\")\n torch.save({\n 'epoch': epoch,\n 'model': model.state_dict(),\n 'opt': optimizer.state_dict(),\n 'max_recall': max_recall\n }, args.model_dump)\n\ndef load_model(ckptname):\n print(\"> Loading..\")\n ckpt = torch.load(ckptname)\n model.load_state_dict(ckpt['model'])\n optimizer.load_state_dict(ckpt['opt'])\n return ckpt['max_recall']\n\ndef testAll(args):\n word2idx, vectors = create_model(args)\n global idx2word\n idx2word = {b:a for a,b in word2idx.items()}\n print(\"> Loading trained model and Test\")\n max_recall = load_model(args.model_dump)\n print(f\"max_recall: {max_recall}\")\n test_data = load_test_data(args, word2idx)\n with torch.no_grad():\n model.eval()\n do_predict(args, test_data, idx2word)\n\n\ndef do_predict(args, test_data, idx2word):\n write = []\n for cou, data in enumerate(test_data):\n input_data_rec, input_data_rep = zip(*data)\n\n input_data_rec = torch.tensor(input_data_rec, dtype=torch.long)\n input_data_rep = torch.tensor(input_data_rep, dtype=torch.long)\n\n input_data_rec = input_data_rec.to(device)\n input_data_rep = input_data_rep.to(device)\n\n pred = model(input_data_rec, input_data_rep)\n\n\n if args.attn == 1 or args.attn == 3:\n pred = pred[0]\n pred1 = nn.Sigmoid()(pred)\n out = pred.detach().cpu().numpy().argsort()[::-1].tolist()[:10]\n out1 = pred1.detach().cpu().numpy().argsort()[::-1].tolist()[:10]\n\n out = \"\".join([\"1-\" if i in out else \"0-\" for i in range(100)])\n write.append((cou+9000001, out))\n\n df = pd.DataFrame(write, columns=['Id', 'Predict'])\n df.to_csv(args.output_csv, index=None)\n\ndef main(args):\n if args.train:\n trainIter(args)\n testAll(args)\n\nif __name__ == '__main__':\n print(device)\n print(rec_len, rep_len)\n parser = argparse.ArgumentParser()\n parser.add_argument('-dp', '--data_path', type=str, default='./data')\n parser.add_argument('-e', '--epochs', type=int, default=30)\n parser.add_argument('-b', '--batch_size', type=int, default=100)\n parser.add_argument('-hn', '--hidden_size', type=int, default=128)\n parser.add_argument('-lr', '--lr_rate', type=float, default=1e-4)\n parser.add_argument('-dr', '--drop_p', type=float, default=0.2)\n parser.add_argument('-md', '--model_dump', type=str, default='./model.tar')\n parser.add_argument('-ml', '--model_load', type=str, default=None, help='Print every p iterations')\n parser.add_argument('-o', '--output_csv', type=str, default='./output.csv')\n parser.add_argument('-p', '--print_iter', type=int, default=1e3, help='Print every p iterations')\n parser.add_argument('-s', '--save_iter', type=int, default=30, help='Save every p iterations')\n parser.add_argument('--max_length', type=int, default=128, help='Max dialogue length')\n parser.add_argument('-tr', '--train', type=int, default=1, help='Train and test: 1, Only test: 0')\n parser.add_argument('-atn', '--attn', type=int, default=1, help='Attn RNN: 1, RNN: 0')\n args = parser.parse_args()\n main(args)","sub_path":"hw1/b05902002/train/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"374876338","text":"def closets(game, to, targets, order=False):\r\n if order:\r\n targets.sort(key=to.distance)\r\n return targets\r\n else:\r\n min_target = None\r\n for target in targets:\r\n if not min_target or to.distance(target) < to.distance(min_target):\r\n min_target = target\r\n return min_target\r\n\r\n\r\ndef attack_pirate(game, pirate):\r\n \"\"\"\r\n return:\r\n if the pirate attacked\r\n \"\"\"\r\n enemies = game.get_enemy_living_aircrafts()\r\n for i in range(len(enemies)):\r\n if (pirate.in_attack_range(enemies[i])):\r\n game.debug('attack pirate')\r\n game.attack(pirate, enemies[i])\r\n return True\r\n return False\r\n\r\n\r\ndef clone(game, pirate):\r\n # Check if the player can decoy a pirate\r\n if pirate.owner.turns_to_decoy_reload == 0:\r\n # Whoosh\r\n game.decoy(pirate)\r\n # print a message\r\n game.debug('pirate ' + str(pirate) + ' decoys itself')\r\n # Did decoy\r\n return True\r\n\r\n # Didnt decoy\r\n return False\r\n\r\n\r\ndef move_to(game, aircraft, target, dir_number=-1):\r\n directions = game.get_sail_options(aircraft, target)\r\n game.set_sail(aircraft, directions[dir_number])\r\n \r\n \r\ndef move_through(game, aircraft, target, through):\r\n current_target = target\r\n through_location = through.get_location()\r\n \r\n if aircraft.get_location().row != target.get_location().row and \\\r\n aircraft.get_location().col != target.get_location().col:\r\n current_target = through\r\n \r\n game.debug(current_target) \r\n move_to(game, aircraft, current_target)","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"452737247","text":"import scipy.misc as misc\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.cm as cm\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(prog=\"It's a cool job\", usage=\"I can use it by this\", description=\"description\")\nparser.add_argument(\"--folder\", type=str, default=\"./visualize_results\", help=\"help content\")\n\nargs = parser.parse_args()\nfile_dir = args.folder\n\ndef read_folder():\n inp_folder = []\n gt_folder = []\n pred_folder = []\n for item in os.listdir(file_dir):\n if item.split(\"_\")[0] == \"inp\":\n inp_folder.append(item)\n elif item.split(\"_\")[0] == \"gt\":\n gt_folder.append(item)\n else:\n pred_folder.append(item)\n image_list = map(sorted, [inp_folder, gt_folder, pred_folder])\n for item in zip(*image_list):\n draw_heatmap(item[1], item[0], item[2])\n\n\ndef draw_heatmap(inp, gt, pred):\n inp_file = os.path.join(file_dir, inp)\n gt_file = os.path.join(file_dir, gt)\n pred_file = os.path.join(file_dir, pred)\n gt = misc.imread(inp_file)\n inp = misc.imread(gt_file)\n pred = misc.imread(pred_file)\n plt.figure(1)\n plt.subplot(131)\n plt.title(\"inp image\")\n plt.imshow(inp)\n plt.subplot(132)\n plt.title(\"gt\")\n plt.imshow(gt, cmap=cm.Paired, vmin=0, vmax=151)\n plt.axis(\"off\")\n plt.subplot(133)\n plt.title(\"pred\")\n plt.imshow(pred, cmap=cm.Paired, vmin=0, vmax=151)\n plt.axis(\"off\")\n file_id = os.path.basename(inp_file).split(\".\")[0].split(\"_\")[1]\n plt.savefig(args.folder+\"/heatmap_\"+file_id+\".png\")\n\n\nif __name__ == \"__main__\":\n read_folder()\n","sub_path":"Image_cmaped.py","file_name":"Image_cmaped.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"217872118","text":"#! python3\nfrom abc import ABCMeta, abstractproperty\n\n\nclass Humano(metaclass=ABCMeta):\n # atributo de classe\n especie = 'Homo Sapiens'\n\n def __init__(self, nome):\n self.nome = nome\n self._idade = None\n\n @abstractproperty\n def inteligente(self):\n pass\n\n @property\n def idade(self):\n return self._idade\n\n @idade.setter\n def idade(self, idade):\n if idade < 0:\n raise ValueError('Idade deve ser um número positivo!')\n self._idade = idade\n\n def das_cavernas(self):\n self.especie = 'Homo Neanderthalensis'\n return self\n\n @staticmethod\n def especies():\n adjetivos = ('Habilis', 'Erectus', 'Neanderthalensis', 'Sapiens')\n return ('Austrolopiteco', ) + tuple(f'Homo {adj}' for adj in adjetivos)\n\n @classmethod\n def is_evoluido(cls):\n return cls.especie == cls.especies()[-1]\n\n\nclass Neanderthal(Humano):\n especie = Humano.especies()[-2]\n\n @property\n def inteligente(self):\n return False\n\n\nclass HomoSpaiens(Humano):\n especie = Humano.especies()[-1]\n\n @property\n def inteligente(self):\n return True\n\n\nif __name__ == '__main__':\n try:\n anonimo = Humano('Jhon Doe')\n print(anonimo.inteligente)\n except TypeError:\n print('Classe abstrata')\n\n jose = HomoSpaiens('Jose')\n print('{} da Classe {}, inteligente: {}'.format(jose.nome, jose.__class__.__name__, jose.inteligente))\n\n grogn = Neanderthal('Grogn')\n print(f'{grogn.nome} da Classe {grogn.__class__.__name__}, inteligente {grogn.inteligente}')","sub_path":"Curso_Python_3_UDEMY/POO_Avancado/evolucao_v6.py","file_name":"evolucao_v6.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"279220311","text":"import math\n\n#sjh\ndef input_ip():\n\t\n\t#get the ip from the user\n\tprint('Enter IP and CIDR ')\n\tip_string = input(\"(ex:'142.42.0.1/25) : \")\n\t\n\t#split up the string into the components\n\toct_1 , oct_2 , oct_3 , oct_4andcidr = ip_string.split(\".\")\n\toct_4 , cidr = oct_4andcidr.split(\"/\") \n\t\n\t#put the strings into an array\n\tip_address = [ oct_1 , oct_2 , oct_3 , oct_4 , cidr ] \n\t\n\t#convert the strings to ints\n\tfor i in range(0,5):\n\t\tip_address[i] = int(ip_address[i])\n\t\n\t#returns the array\n\treturn(ip_address)\n\ndef calculate(ip_address):\n\t\n\t#netmask\n\t#initialize netmask array\n\tnetmask = [0,0,0,0]\n\t\n\t#get how many octets are set to 255 by the cidr\n\tfull_octets = int(ip_address[4]/8)\n\t#set the octets to 255\n\tfor i in range(0,full_octets):\n\t\tnetmask[i] = 255\n\t\n\t#get the remaining octet that wasnt filled\n\tremainder_octet = ip_address[4]%8\n\t#do the remaining octet only if there is one\n\tif(full_octets != 4):\n\t\tnetmask[full_octets] = 0\n\t\tfor i in range(0,remainder_octet):\n\t\t\t#add powers of two for each bit of remainder octet\n\t\t\tnetmask[full_octets] += 2**(7-i)\n\t\n\tprint(\"netmask:\")\n\tprint(netmask)\n\t\n\tnetwork_id = [0,0,0,0]\n\tfor i in range(0,4):\n\t\tnetwork_id[i] = ip_address[i] & netmask[i]\n\tprint(\"network_id:\")\n\tprint(network_id)\n\t\n\tbroadcast_id = [0,0,0,0]\n\tfor i in range(0,4):\n\t\tbroadcast_id[i] = network_id[i] | 255 - netmask[i]\n\tprint(\"broadcast_id:\")\n\tprint(broadcast_id)\n\n#to test it\ncalculate(input_ip())\n\n\n#def hostrange(ip_address):\n\t\n","sub_path":"pythonip2.py","file_name":"pythonip2.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"94430678","text":"class Node:\n def __init__(self,l,d,r):\n self.left = l\n self.right = r\n self.id = d\n\nn0 = Node(None,0,None)\nn1 = Node(None,1,None)\nn2 = Node(None,2,None)\nn3 = Node(None,3,None)\nn4 = Node(None,4,None)\nn5 = Node(None,5,None)\nn6 = Node(None,6,None)\nn7 = Node(None,7,None)\nn8 = Node(None,8,None)\nn9 = Node(None,9,None)\nn10 = Node(None,10,None)\nn0.left = n1\nn0.right = n2\nn1.left = n3\nn1.right = n4\nn2.left = n5\nn5.left = n7\nn7.left = n8\nn2.right = n6\nn6.left = n9\nn6.right = n10\n\ndef commonParent(root, id1, id2):\n def find_path(n,id,path):\n if n == None: return False\n path.append(n)\n if n.id == id:\n return True\n if find_path(n.left,id,path):\n return True\n elif find_path(n.right,id,path):\n return True\n else:\n path.pop()\n return False\n path1 = []\n path2 = []\n if find_path(root, id1, path1) and find_path(root, id2, path2):\n prev = root\n for i in range(min(len(path1),len(path2))):\n if path1[i] != path2[i]:\n return prev\n prev = path1[i]\n if len(path1) < len(path2):\n return path1.pop()\n else:\n return path2.pop()\n return None\n \nif __name__ == '__main__':\n print (0 == commonParent(n0, 0, 0).id)\n print (0 == commonParent(n0, 1, 2).id)\n print (0 == commonParent(n0, 4, 2).id)\n print (1 == commonParent(n0, 3, 4).id)\n print (0 == commonParent(n0, 8, 4).id)\n print (7 == commonParent(n0, 7, 8).id)\n print (2 == commonParent(n0, 8, 10).id)\n print (0 == commonParent(n0, 0, 0).id)\n","sub_path":"infectmac/btree/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"172340689","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport selenium.common.exceptions\nimport json\nimport csv\nimport time\nimport re\nimport numpy as np\nimport pymysql.cursors\nfrom functools import reduce\nimport traceback\n\ndef str2int(s):\n def fn(x,y):\n return x*10+y\n def char2num(s):\n return {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}[s]\n return reduce(fn,map(char2num,s))\nclass JdSpider():\n def open_browser(self):\n self.browser = webdriver.Chrome()\n self.browser.implicitly_wait(10)\n self.wait = WebDriverWait(self.browser,10)\n\n def init_variable(self):\n \n self.isLast = False\n self.beginpage=''\n self.endpage=''\n def parse_shop(self,link):\n js='window.open'+\"('\"+link+\"')\"\n self.browser.execute_script(js)#打开新窗口\n self.browser.switch_to.window(self.browser.window_handles[-1])#切换到新的标签页\n\n\n idlist=re.findall(r'\\b\\d+\\b',link)\n id=str(idlist[0])+'/'+str(idlist[1])#根据链接提取datas后的id\n\n print('正在爬取ID为'+id+'商品的数据')\n\n name=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//h1[@class=\"fl\"]')))#爬名称\n name=[item.text for item in name]\n name=name[0]\n newprice={}\n try:\n price=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//span[@class=\"price-num\"]')))\n price=[item.get_attribute('innerText') for item in price]#忽略不可见,爬全部价格\n price=[item for item in price]\n if(price[0]=='免费'):#免费的商品一般会有次数限制\n num=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//span[@class=\"mianfei-tag\"]')))\n num=[item.text for item in num]\n newprice={price[0]:num[0]}\n else:\n num=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[contains(@class,\"price-times one-item\")]')))\n num=[item.text for item in num]\n newnum=[]\n for item in num:#爬多少钱可以买多少\n if(re.search(r'\\d',item)):\n newnum.append(re.search(r\"\\d+\\.?\\d*\",str(item)).group())#不看后面的约等于,只取前面的\n if(\"万\" in item):\n newnum[-1]+='0000'#把汉字万换成10000乘上去\n else:\n newnum.append(item)#不含数字,/周/月这样的\n num=newnum\n index=0\n for item in price:\n newprice[str(re.search(r'\\d+',item).group())]=str(num[index])\n index-=-1\n except selenium.common.exceptions.TimeoutException:#遇到特殊商品了,此时商品规格就是价格\n num=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[contains(@class,\"price-times one-item\")]')))\n num=[item.text for item in num]\n newnum=[]\n \n for item in num:#爬多少钱可以买多少\n if(re.search(r'\\d',item)):\n newnum.append(re.search(r\"\\d+\\.?\\d*\",str(item)).group())#不看后面的约等于,只取前面的\n if(\"万\" in item):\n newnum[-1]+='0000'#把汉字万换成10000乘上去\n else:\n newnum.append(item)#不含数字,/周/月这样的\n num=newnum\n for item in newnum:\n newprice[item]=str(item)\n except IndexError:#遇到数组越界 直接关闭页面下一个\n self.browser.close()\n self.browser.switch_to.window(self.browser.window_handles[0])\n\n \n \n jprice=json.dumps(newprice)#将dic转化为json格式的数据\n company=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//span[@class=\"blue\"]')))\n company=[item.text for item in company]\n company=company[0]\n view=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//ul[@class=\"operation-list\"]/li[1]')))\n view=[item.text for item in view]\n #views.append(re.search(r'\\d+',view[0]).group())#爬浏览\n view=str2int(re.search(r'\\d+',view[0]).group())\n buy=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//ul[@class=\"operation-list\"]/li[2]')))\n buy=[item.text for item in buy]\n #buys.append(re.search(r'\\d+',buy[0]).group())#爬购买次数\n buy=str2int(re.search(r'\\d+',buy[0]).group())\n collection=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@id=\"add-fav\"]/a/li')))\n collection=[item.text for item in collection]\n #collections.append(re.search(r'\\d+',collection[0]).group())#爬收藏\n collection=str2int(re.search(r'\\d+',collection[0]).group())\n tag=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//ul[@class=\"info-list cl\"]/li[2]')))\n tag=[item.text for item in tag]\n tag=tag[0]\n flag=0;\n newword=''\n newtag=[]\n\n for word in tag:\n if(flag==1):\n if(word!='\\u2003'):\n newword=newword+word\n else:\n flag=0\n newtag.append(newword)\n newword=''\n if(flag==0):\n if(word=='\\n'or word==' '):\n flag=1;\n\n newtag.append(newword)#将tag储存在list中\n jtag=json.dumps(newtag,ensure_ascii=False)#list转json\n shop=[link,name,jprice,company,view,buy,collection,jtag]\n if(SQLOS.InsertShop(shop)==0):\n self.browser.close()\n self.browser.switch_to.window(self.browser.window_handles[0])\n return 1\n \n ishistorylast=0\n try:\n self.wait.until(EC.element_to_be_clickable((By.XPATH,'//li[@id=\"detailTab6\"]'))).click()\n\n except selenium.common.exceptions.NoSuchElementException:\n print('id为'+id+'的商品无购买记录')\n ishistorylast=1\n except selenium.common.exceptions.TimeoutException:\n print('id为'+id+'的商品无购买记录')\n self.browser.close()\n self.browser.switch_to.window(self.browser.window_handles[0])\n return 0\n customers=[]\n dates=[]\n types=[]\n newid=[]\n count=0\n while (ishistorylast!=1):\n\n try:\n time.sleep(1)\n customer=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class=\"order-lst script-order-list\"]/table/tbody/tr/td[1]')))\n customer=[item.text for item in customer]\n date=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class=\"order-lst script-order-list\"]/table/tbody/tr/td[2]')))\n date=[item.text for item in date]\n typess=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class=\"order-lst script-order-list\"]/table/tbody/tr/td[3]')))\n typess=[item.text for item in typess]\n customers+=customer\n dates+=date\n types+=typess\n count-=-1\n print(' 正在爬取 '+name+' 的第 '+str(count)+' 页订单记录')\n for i in range(0,len(customer)):\n newid.append(id)\n\n\n except selenium.common.exceptions.NoSuchElementException:\n ishistorylast=1\n except selenium.common.exceptions.TimeoutException:\n ishistorylast=1\n except selenium.common.exceptions.StaleElementReferenceException:\n self.wait.until(EC.element_to_be_clickable((By.XPATH,'//li[@id=\"detailTab6\"]'))).click()\n\n try:\n time.sleep(1)\n self.wait.until(EC.element_to_be_clickable((By.XPATH,'//a[@title=\"下一页\"]'))).click()\n except selenium.common.exceptions.NoSuchElementException:\n print(1)\n ishistorylast=1\n except selenium.common.exceptions.TimeoutException:\n print(2)\n ishistorylast=1\n history=zip(newid,customers,dates,types)\n try:\n SQLOS.InsertHistory(list(history))\n except IndexError:#遇到数组越界 直接关闭页面下一个\n self.browser.close()\n self.browser.switch_to.window(self.browser.window_handles[0])\n\n self.browser.close()\n self.browser.switch_to.window(self.browser.window_handles[0])\n\n def parse_page(self):\n try:\n time.sleep(1)\n skus = self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//li[@class=\"boder_v1\"]')))\n skus = [item.find_element_by_css_selector('a').get_attribute('href') for item in skus]\n links = skus#首页商品链接\n for link in links:\n self.parse_shop(link)\n \n except selenium.common.exceptions.TimeoutException:\n print('parse_page: TimeoutException')\n self.parse_page()\n except selenium.common.exceptions.StaleElementReferenceException:\n print('parse_page: StaleElementReferenceException')\n self.browser.refresh()\n\n def turn_page(self):\n try:\n next_btn=self.wait.until(EC.element_to_be_clickable((By.XPATH,'//a[@title=\"下一页\"]')))\n self.browser.execute_script(\"arguments[0].click();\", next_btn)\n time.sleep(1)\n self.browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n time.sleep(2)\n except selenium.common.exceptions.NoSuchElementException:\n self.isLast = True\n except selenium.common.exceptions.TimeoutException:\n print('turn_page: TimeoutException')\n self.isLast = True\n except selenium.common.exceptions.StaleElementReferenceException:\n print('turn_page: StaleElementReferenceException')\n self.browser.refresh()\n \n \n def close_browser(self):\n self.browser.quit()\n\n \n def crawl(self):\n self.open_browser()\n self.init_variable()\n print('请输入起始页码')\n self.beginpage=input()\n print('请输入终止页码')\n self.endpage=input()\n print('开始爬取')\n self.browser.get('https://wx.jdcloud.com/api/2_0/'+str(self.beginpage))\n time.sleep(1)\n self.browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n time.sleep(2)\n count =str2int(self.beginpage)\n while not (count==self.endpage or self.isLast==True):\n count += 1\n print('正在爬取第 ' + str(count) + ' 页......')\n self.parse_page()\n self.turn_page()\n self.close_browser()\n print('结束爬取')\n\nclass SQLOS():\n def __init__(self):\n pass\n \n def Connect_to_DB():\n connection = pymysql.connect(host='rm-bp10wr08s7nl319dcyo.mysql.rds.aliyuncs.com',\n user='jdwx_user',\n password='5^5*RUhD0QyQopX6',\n db='jdwxasset',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n return connection#链接服务器 返回一个connect\n def InsertShop(shop):\n db=SQLOS.Connect_to_DB()\n cursor=db.cursor()\n if cursor.execute(\"SELECT * from d_shoppinglist WHERE `link`=%s\",shop[0]):#已经有了该商品的记录\n db.close()\n print('商品链接为'+shop[0]+'的商品已有记录')\n return 0\n else:\n try:\n cursor.execute(\"INSERT INTO d_shoppinglist (`link`,`name`,`price`,`company`,`view`,`buy`,`collection`,`tag`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\",(shop[0],shop[1],shop[2],shop[3],shop[4],shop[5],shop[6],shop[7]))\n print(\"INSERT INTO d_shoppinglist (`link`,`name`,`price`,`company`,`view`,`buy`,`collection`,`tag`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\",(shop[0],shop[1],shop[2],shop[3],shop[4],shop[5],shop[6],shop[7]))\n\n db.commit()\n db.close()\n return 1\n except Exception as e:\n print(str(e))\n print(e.message)\n print('traceback.print_exc():',traceback.print_exc())\n \n print(1)\n db.rollback()\n db.close()\n return 0\n def InsertHistory(history):\n db=SQLOS.Connect_to_DB()\n cursor=db.cursor()\n if cursor.execute(\"SELECT * from d_historylist WHERE `shopid`=%s\",history[0][0]):#已经有了该商品的记录\n db.close()\n print('商品ID为'+history[0][0]+'的商品已有记录')\n return 0\n else:\n try:\n for item in history:\n print(\"INSERT INTO d_historylist(`shopid`,`customer`,`dates`,`type`) VALUES(%s,%s,%s,%s)\",(item[0],item[1],item[2],item[3]))\n cursor.execute(\"INSERT INTO d_historylist(`shopid`,`customer`,`dates`,`type`) VALUES(%s,%s,%s,%s)\",(item[0],item[1],item[2],item[3]))\n \n db.commit()\n db.close()\n return 1\n except Exception as e:\n print(str(e))\n print(e.message)\n print('traceback.print_exc():',traceback.print_exc())\n \n print(2)\n db.rollback()\n db.close()\n return 0\n \nif __name__ == '__main__':\n spider = JdSpider()\n spider.crawl()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"606046766","text":"__author__ = 'Guillermo Avendano-Franco'\n\nimport os\nimport numpy as np\n\n\nclass VaspOutput():\n\n def __init__(self, filename='OUTCAR'):\n\n self.magnetization = None\n self.total_charge = None\n self.free_energy = None\n self.forces = None\n self.stress = None\n\n if not os.path.isfile(filename):\n raise ValueError('File not found '+filename)\n\n rf = open(filename)\n lines = rf.readlines()\n\n for i in range(len(lines)):\n if lines[i].strip() == 'magnetization (x)':\n self.get_magnetization(i, lines)\n elif lines[i].strip() == 'total charge':\n self.get_total_charge(i, lines)\n elif lines[i].strip() == 'FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)':\n self.get_free_energy(i, lines)\n elif lines[i].strip() == 'FORCE on cell =-STRESS in cart. coord. units (eV):':\n if lines[i+1].strip().startswith('Direction') and lines[i+2].strip().startswith('---'):\n self.get_stress(i, lines)\n elif lines[i].strip() == 'POSITION TOTAL-FORCE (eV/Angst)':\n self.get_forces(i, lines)\n\n def get_magnetization(self, iline, lines):\n keys = [x.strip() for x in lines[iline+2].split(5*' ')]\n i = 3\n self.magnetization = {}\n for k in keys:\n self.magnetization[k] = []\n while True:\n i += 1\n if lines[iline+i].strip().startswith('---'):\n break\n values = [float(x) for x in lines[iline + i].split()]\n assert(len(keys) == len(values))\n for ik in range(len(keys)):\n self.magnetization[keys[ik]].append(values[ik])\n\n def get_total_charge(self, iline, lines):\n keys = [x.strip() for x in lines[iline+2].split(5*' ')]\n i = 3\n self.total_charge = {}\n for k in keys:\n self.total_charge[k] = []\n while True:\n i += 1\n if lines[iline+i].strip().startswith('---'):\n break\n values = lines[iline + i].split()\n assert(len(keys) == len(values))\n for ik in range(len(keys)):\n self.total_charge[keys[ik]].append(values[ik])\n\n def get_free_energy(self, iline, lines):\n self.free_energy = float(lines[iline+2].split()[4])\n\n def get_forces(self, iline, lines):\n i = 1\n self.forces = []\n while True:\n i += 1\n if lines[iline+i].strip() == 83*'-':\n break\n values = lines[iline + i].split()\n self.forces.append([float(x) for x in values[-3:]])\n if not self.forces:\n self.forces = None\n\n def get_stress(self, iline, lines):\n i = 2\n self.stress = {}\n\n while True:\n i += 1\n if lines[iline+i].strip().startswith('---'):\n break\n key = lines[iline + i][:9].strip()\n #print lines[iline + i][9:]\n #print lines[iline+i]\n values = [float(x) for x in lines[iline + i][9:].split()]\n #print values\n self.stress[key] = values\n i += 1\n values = [float(x) for x in lines[iline + i][9:].split()]\n self.stress['Total'] = values\n if self.stress == {}:\n self.stress = None\n\n def __str__(self):\n ret = '\\nForces:\\n'\n index = 0\n for iforce in self.forces:\n index += 1\n ret += \"%3d %9.6f %9.6f %9.6f\\n\" % (index, iforce[0], iforce[1], iforce[2])\n ret += '\\nStress:\\n'\n for istress in sorted(self.stress):\n if istress != 'Total':\n ret += '%8s ' % istress\n for i in self.stress[istress]:\n ret += ' %12.6f' % i\n ret += '\\n'\n ret += '%8s ' % 'Total'\n for i in self.stress['Total']:\n ret += ' %12.6f' % i\n ret += '\\n'\n\n ret += '\\nFree Energy:\\n'\n ret += str(self.free_energy)\n return ret\n\n def relaxation_info(self):\n\n info = {}\n if self.stress is not None:\n info['avg_stress_diag'] = np.average(np.abs(self.stress['Total'][:3]))\n info['avg_stress_non_diag'] = np.average(np.abs(self.stress['Total'][-3:]))\n if self.forces is not None:\n info['avg_force'] = np.average(np.abs(np.apply_along_axis(np.linalg.norm, 1, self.forces)))\n return info\n\n def to_dict(self):\n ret = {}\n for i in ['magnetization', 'total_charge', 'free_energy', 'forces', 'stress']:\n ret[i] = eval('self.'+i)\n return ret\n","sub_path":"pychemia/code/vasp/_outcar.py","file_name":"_outcar.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"602847648","text":"\"\"\"Module that handles basic level bot commands\"\"\"\n\nimport discord\nfrom discord.ext import commands\nimport emoji\nimport global_vars\nimport database\n\nclass BasicCommands(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n# START COMMANDS\n# ----------------------\n\n # Commands cannot start with the terms cog_ or bot_\n @commands.command(name='botinfo')\n async def staticbot_info(self, ctx):\n \"\"\"\n Posts some information about the bot\n \"\"\"\n embed = discord.Embed(title='Information about staticbot', type='rich', timestamp=ctx.message.created_at, color = global_vars.EMBED_COL)\n embed.add_field(name=':sunglasses: Author', value='static#3628', inline=True)\n embed.add_field(name=':floppy_disk: GitHub', value='[Link](https://github.com/cjrose/discord-bot/)', inline=True)\n embed.add_field(name=':bird: Twitter', value='[Link](https://twitter.com/codyjrose/)', inline=True)\n embed.add_field(name=':book: Language', value='Python 3.7', inline=True)\n embed.add_field(name=':snake: Wrapper', value='discord.py', inline=True)\n embed.add_field(name=':robot: Version', value=global_vars.VERSION, inline=True)\n await ctx.send(content=None, embed=embed)\n \n @commands.command(name='guildinfo')\n async def guild_info(self, ctx):\n \"\"\"\n Posts some information about the guild\n \"\"\"\n emotes = '**Emotes:**\\n'\n for emote in ctx.guild.emojis:\n emotes += '{0}'.format(emote)\n\n log_channel = database.get_log_channel(ctx.guild)\n if log_channel == 0:\n log_channel='Not Set'\n else:\n log_channel='<#{0}>'.format(log_channel)\n \n starboard_channel = database.get_starboard_channel(ctx.guild)\n if starboard_channel == 0:\n starboard_channel='Not Set'\n else:\n starboard_channel='<#{0}>'.format(starboard_channel)\n\n system_channel = ctx.guild.system_channel\n if system_channel is None:\n system_channel = 'Not Set'\n else:\n system_channel=system_channel.mention\n\n thumbnail_url = str(ctx.guild.icon_url_as(format='png', size=4096))\n bot_count = 0\n for member in ctx.guild.members:\n if member.bot:\n bot_count += 1\n \n embed = discord.Embed(title='Information about {0}'.format(str(ctx.guild)), type='rich', description=emotes, timestamp=ctx.message.created_at, color=global_vars.EMBED_COL)\n\n embed.add_field(name=':1234: Guild ID', value=ctx.guild.id, inline=True)\n embed.add_field(name=':sunglasses: Owner', value='{0}#{1}'.format(ctx.guild.owner.name, ctx.guild.owner.discriminator), inline=True)\n embed.add_field(name=':robot: Bot Count', value=bot_count, inline=True)\n\n embed.add_field(name=':date: Created At', value=str(ctx.guild.created_at)[:-10], inline=True)\n embed.add_field(name=':family: Member Count', value=ctx.guild.member_count, inline=True)\n embed.add_field(name=':flag_us: Region', value=ctx.guild.region, inline=True)\n\n embed.add_field(name=':gear: System Channel', value=system_channel, inline=True)\n embed.add_field(name=':tv: Logging Channel', value=log_channel, inline=True)\n embed.add_field(name=':tv: Starboard Channel', value=starboard_channel, inline=True)\n embed.set_thumbnail(url=thumbnail_url)\n\n await ctx.send(content=None, embed=embed)\n \n @commands.command(name='userinfo')\n async def user_info(self, ctx, user=None):\n \"\"\"\n Sends an embed of information about the specified user\n \"\"\"\n\n if len(ctx.message.mentions) == 1:\n user = ctx.message.mentions[0]\n elif user is not None:\n user = ctx.guild.get_member_named(user)\n # user not found\n if user is None:\n await ctx.send('{0} Could not find that user'.format(global_vars.X_MARK_EMOJI))\n return\n else:\n user = ctx.author\n \n thumbnail_url = str(user.avatar_url_as(format='png', size=4096))\n user_roles = user.roles\n roles = ''\n for role in user_roles:\n roles += '{0} '.format(role.mention)\n \n curr_info = database.get_guild_currency_info(ctx.guild)\n curr_emote = emoji.emojize(curr_info[0])\n bal = database.get_balance(user)\n \n embed = discord.Embed(title='Information about {0}#{1}'.format(user.name, user.discriminator), type='rich', timestamp=ctx.message.created_at, color=global_vars.EMBED_COL)\n embed.set_thumbnail(url=thumbnail_url)\n\n\n # TODO make this a little better in the future\n user_activity = 'None'\n user_activities = user.activities\n if len(user_activities) > 0:\n user_activity = ''\n for activity in user_activities:\n user_activity += '{0}, '.format(activity.name)\n user_activity = user_activity[:-2]\n \n\n embed.add_field(name=':1234: User ID', value=user.id, inline=True)\n embed.add_field(name=':date: Created On', value=str(user.created_at)[:-10], inline=True)\n embed.add_field(name=':date: Joined On', value=str(user.joined_at)[:-10], inline=True)\n\n embed.add_field(name=':crown: Roles [{0}]'.format(len(user.roles)), value=roles, inline=True)\n embed.add_field(name='{0} Balance'.format(curr_emote), value='{0} {1}'.format(bal, curr_info[1]), inline=True)\n embed.add_field(name=':information_source: Status', value=user.status, inline=True)\n\n embed.add_field(name=':iphone: On Mobile?', value=user.is_on_mobile(), inline=True)\n embed.add_field(name=':robot: Bot?', value=user.bot, inline=True)\n embed.add_field(name=':question: Doing stuff?', value=user_activity, inline=True)\n \n await ctx.send(content=None, embed=embed)\n\n# END COMMANDS\n# ----------------------\n \ndef setup(bot):\n bot.add_cog(BasicCommands(bot))\n","sub_path":"cogs/basic_commands.py","file_name":"basic_commands.py","file_ext":"py","file_size_in_byte":6008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"358518483","text":"# Plot the DDG\n\n# Read output files and extract needed values\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport numpy\nimport re\n\na = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\data\\\\LowNumberaa.fasta\"\nb = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\data\\\\HighNumberaa.fasta\"\nc = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\structure\\\\ddg\\\\ddg_predictions_32.out\"\nd = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\structure\\\\ddg\\\\ddg_predictions_47.out\"\ne = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\structure\\\\ddg\\\\ddg_predictions_48.out\"\nf = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\structure\\\\ddg\\\\ddg_predictions_49.out\"\ng = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\structure\\\\ddg\\\\ddg_predictions_57.out\"\nh = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\structure\\\\ddg\\\\ddg_predictions_58.out\"\nsavepath = 'C:/Users/ITSloaner/PycharmProjects/Nanobodyfitness/Structure/Figures'\n\n# Get the total values\ntot32, tot47, tot48, tot49, tot57, tot58 = [], [], [], [], [], []\ntot = [tot32, tot47, tot48, tot49, tot57, tot58]\nnames = [c, d, e, f, g, h]\n\nfor a in range(len(names)):\n fields = [\n \"ddG: description\" \"total\" \"fa_atr\" \"fa_rep\" \"fa_sol\" \"fa_intra_rep\" \"fa_intra_sol_xover4\" \"lk_ball_wtd\" \"fa_elec\" \"pro_close\" \"hbond_sr_bb\" \"hbond_lr_bb\" \"hbond_bb_sc\" \"hbond_sc dslf_fa13\" \"omega fa_dun\" \"p_aa_pp\" \"yhh_planarity\" \"ref rama_prepro\"]\n fname = names[a]\n lis = pd.read_csv(fname, skiprows=0, keep_default_na=bool, names=fields)\n lia = []\n for i in range(len(lis)):\n # print(i)\n lists = lis.iloc[i]\n lits = pd.array(lists, dtype=\"string\")\n lists1 = lits[0].split(\" \", 21)\n lia.append(lists1)\n for i in range(len(lia) - 1):\n toa = tot[a]\n k = i + 1\n ddg_tot = lia[k][1]\n if \" \" in ddg_tot:\n ddg_tot = ddg_tot.replace(\" \", \"\")\n elif \" \" in ddg_tot:\n ddg_tot = ddg_tot.replace(\" \", \"\")\n toa.append(float(ddg_tot))\n\n\ndef percentnumlohi(a, b):\n # Read out the fasta file\n nam = [a, b]\n taanumlo, taanumhi = [], []\n taan = [taanumlo, taanumhi]\n\n for g in range(len(taan)):\n fname = nam[g]\n high_nb_file = open(fname) # default - r - open for reading\n high_nb = high_nb_file.read()\n high_nb_list = high_nb.splitlines()\n high_nb_file.close()\n # print(high_nb_list)\n\n aaseq = []\n taa = []\n num_line = numpy.arange(3, len(high_nb_list), 2) # Note this might change\n for position, line in enumerate(high_nb_list):\n line = line[:-1]\n # print(position, line)\n if position in num_line:\n aaseq.append(line)\n sp = line.split(\",\")\n taa.append(sp)\n # print(sp)\n # print(len(sp))\n\n # print(taa)\n # Convert to integers\n taanum = taan[g]\n for a in range(len(taa)): # Positions in aa like 32, 45\n numa = a\n # print(taanum)\n cent = []\n for b in range(len(taa[a])):\n st = int(taa[a][b])\n cent.append(st)\n # print(cent)\n taanum.append(cent)\n\n # print(taa)\n # print('taanum:', taanum)\n # print(len(taanum))\n\n # print(taanumlo)\n # print(len(taanumlo))\n aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'W', 'V', 'Y', '-']\n posaa = [32, 47, 48, 49, 57, 58, 59, 60, 124, 125, 126]\n\n # Pernumlo, pernumhi\n sumnumlo = [0] * len(taanumlo)\n sumnumhi = [0] * len(taanumlo)\n for i in range(len(taanumlo)):\n talo = taanumlo[i]\n sumnumlo[i] = sum(talo)\n tahi = taanumhi[i]\n sumnumhi[i] = sum(tahi)\n\n # Get percent\n pernumlo = []\n pernumhi = []\n for i in range(len(taanumlo)):\n arrlo, arrhi = [], []\n for z in range(len(taanumlo[0])):\n arrlo.append(taanumlo[i][z] * 100 / sumnumlo[0])\n arrhi.append(taanumhi[i][z] * 100 / sumnumhi[0])\n pernumlo.append(arrlo)\n pernumhi.append(arrhi)\n\n # print(pernumlo)\n # print(pernumhi)\n # print(len(pernumhi))\n return pernumlo, pernumhi\n\na = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\data\\\\LowNumberaa.fasta\"\nb = \"C:\\\\Users\\\\ITSloaner\\\\PycharmProjects\\\\Nanobodyfitness\\\\data\\\\HighNumberaa.fasta\"\n\npernumlo, pernumhi = percentnumlohi(a, b)\nprint(pernumlo, pernumhi)\n\naa = ['A', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\naa2 = ['A', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y', '-']\nlabels = ['aa 32', 'aa 47', 'aa 48', 'aa 49', 'aa 57', 'aa 58'] # Highlight the original value\naaorig = ['N', 'L', 'V', 'A', 'I', 'T']\naaono = [9, 8, 15, 0, 6, 14]\nposaa = [32, 47, 48, 49, 57, 58, 59, 60, 124, 125, 126]\n\n# Plot\nfor i in range(len(names)):\n n = aaono[i]\n plt.plot(aa, tot[i])\n print(aamin)\n plt.plot(aa[n], tot[i][n], 'rx', label='Original aa')\n plt.legend()\n plt.xlabel('Amino Acids')\n plt.ylabel('ddg (REU)') # units\n plt.title('Plot of change in amino acids versus ddg in %s' % labels[i])\n # file_name = ('ddg_%s.png' % labels[i])\n # plt.savefig(os.path.join(savepath, file_name))\n plt.show()\n\n# idea - plot individually and use the original position as a point of reference in compating others\n# Position 47 49 important due to the high difference in energy with change in aa\n","sub_path":"ddgplot.py","file_name":"ddgplot.py","file_ext":"py","file_size_in_byte":5576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"270119170","text":"import torch\nfrom torch import nn\nfrom torch.optim import Adam\nimport torchvision as T\nimport torchvision.transforms as Transformer\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import make_grid, save_image\nimport pytorch_lightning as pl\nfrom pytorch_lightning.core.decorators import auto_move_data\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom model import *\nfrom datasets import *\nimport VGG\nfrom argparse import ArgumentParser\nimport os\n\n\nclass PLModel(pl.LightningModule):\n def __init__(self, args = None, only_one_dataset = False):\n super(PLModel, self).__init__()\n if args is None:\n parser = ArgumentParser()\n parser = self.add_model_args(parser)\n self.args = parser.parse_args()\n else:\n self.args = args\n self.only_one_dataset = only_one_dataset\n self.save_hyperparameters()\n self.model = Model()\n vgg = VGG.vgg\n vgg.load_state_dict(torch.load(self.args.vgg_ckp_file))\n vgg = nn.Sequential(*list(vgg.children())[:31])\n self.net = VGG.Net(vgg)\n self.L_loss = LaplacianRegularizer()\n self.epoch_indx = 0\n\n @staticmethod\n def add_model_args(parser):\n parser = ArgumentParser(parents=[parser], add_help=False)\n parser.add_argument(\"--batch_size\", type=int, default=4, help=\"batch size\")\n parser.add_argument(\"--vgg_ckp_file\", type=str, default=\"./checkpoints/vgg_normalised.pth\")\n parser.add_argument(\"--content_path\", type=str, default=\"/mnt/DataDisk/public/dataset/COCO/2017/images/train2017\")\n parser.add_argument(\"--style_path\", type=str, default=\"/mnt/DataDisk/public/dataset/COCO/2017/images/train2017\")\n parser.add_argument(\"--lambda_c\", type=float, default=0.5, help=\"coeff for loss c\")\n parser.add_argument(\"--lambda_s\", type=float, default=1, help = \"coeff for loss s\")\n parser.add_argument(\"--lambda_r\", type=float, default=0.15, help = \"coeff for loss r\")\n parser.add_argument(\"--num_workers\", type=int, default = 6)\n parser.add_argument(\"--img_size\", type=int, default= 512)\n parser.add_argument(\"--lr\", type=float, default= 1e-4, help = \"learning rate\")\n parser.add_argument(\"--val_content\", type=str, default=\"./images/content_test/\")\n parser.add_argument(\"--val_style\", type=str, default=\"./images/style_test/\")\n parser.add_argument(\"--log_training\", type=int, default=False)\n parser.add_argument(\"--log_validation\", type=int, default=True)\n parser.add_argument(\"--val_set_size\", type=int, default=-1)\n parser.add_argument(\"--train_set_size\", type=int, default=-1)\n parser.add_argument(\"--val_one_by_one\", type=int, default=False)\n return parser\n\n def prepare_data(self):\n content_path = self.args.content_path\n style_path = self.args.style_path\n self.train_dataset = JBLDataset(content_path, style_path, img_size=self.args.img_size, set_size=self.args.train_set_size)\n self.val_dataset = JBLDataset(self.args.val_content, self.args.val_style, \n img_size =self.args.img_size, one_by_one=self.args.val_one_by_one, \n set_size = self.args.val_set_size,\n num_gpus = len(self.args.gpus.split(',')))\n self.train_loader = DataLoader(self.train_dataset, num_workers=self.args.num_workers,\n batch_size = self.args.batch_size*2, shuffle=True)\n self.val_loader = DataLoader(self.val_dataset, batch_size=self.args.batch_size)\n \n def val_dataloader(self):\n if not hasattr(self, 'val_loader'):\n self.prepare_data()\n return self.val_loader\n \n def train_dataloader(self):\n if not hasattr(self, 'train_loader'):\n self.prepare_data()\n return self.train_loader\n\n @auto_move_data\n def forward(self, low_cont, cont_img, style_img, low_style):\n # input image tensors\n cont_feat = self.net.encode_with_intermediate(low_cont) # VGG 前4层的feature map\n style_feat = self.net.encode_with_intermediate(low_style) \n coeffs,output = self.model(cont_img,cont_feat,style_feat)\n return coeffs,output\n\n def sample_image(self, vgg, model,batch, logname, img_idx):\n cont_img,low_cont,style_img,low_style = batch\n batch_size = cont_img.shape[0]\n model.eval()\n cont_feat = vgg.encode_with_intermediate(low_cont)\n style_feat = vgg.encode_with_intermediate(low_style)\n coeffs, output = model(cont_img, cont_feat, style_feat)\n\n cont = make_grid(cont_img, nrow=batch_size, normalize=True)\n style = make_grid(style_img, nrow=batch_size, normalize=True)\n out = make_grid(output, nrow=batch_size, normalize=True)\n\n image_grid = torch.cat((cont, style, out), 1)\n self.logger.experiment.add_image(logname, image_grid, img_idx)\n # save_image(image_grid, output_file + 'output'+str(epoch)+'.jpg', normalize=False)\n model.train()\n return image_grid\n\n def validation_step(self, batch, batch_idx):\n low_cont, cont_img, low_style, style_img = batch\n if self.args.log_validation:\n batch = [cont_img,low_cont,style_img,low_style]\n self.sample_image(self.net, self.model, batch, \"val\", self.epoch_indx)\n pass\n \n def training_step(self, batch, batch_idx):\n if self.only_one_dataset:\n low_conts, cont_imgs = batch\n bs = low_conts.shape[0]//2\n low_cont = low_conts[:bs, :].contiguous()\n cont_img = cont_imgs[:bs, :].contiguous()\n low_style = low_conts[bs:bs*2, :].contiguous()\n style_img = cont_imgs[bs:bs*2, :].contiguous()\n del low_conts, cont_imgs\n else:\n low_cont, cont_img, low_style, style_img = batch\n \n coeffs, output = self(low_cont, cont_img, style_img, low_style) \n loss_c,loss_s = self.net.loss(output,cont_img,style_img)\n loss_r = self.L_loss(coeffs)\n total_loss = self.args.lambda_c * loss_c + self.args.lambda_s * loss_s + self.args.lambda_r * loss_r\n self.log(\"train_loss\", total_loss)\n if self.args.log_training:\n sample_idx = batch_idx + self.epoch_indx * len(self.train_loader)\n if sample_idx % 30 == 0:\n batch = [cont_img,low_cont,style_img,low_style]\n self.sample_image(self.net, self.model, batch,'train', sample_idx)\n return total_loss\n\n def training_epoch_end(self, reslut_list):\n self.epoch_indx += 1\n \n def configure_optimizers(self):\n return Adam(self.model.parameters(), lr=self.args.lr)\n\nif __name__ == \"__main__\":\n # print(\"VISABLE\", os.environ['CUDA_VISIBLE_DEVICES'])\n parser = ArgumentParser()\n parser.add_argument(\"--logdir\", type=str, default=\"./log\")\n parser.add_argument(\"--logname\", type=str, default = \"pl\")\n parser.add_argument(\"--test\", type=int, default=False)\n parser = PLModel.add_model_args(parser)\n parser = pl.Trainer.add_argparse_args(parser)\n args = parser.parse_args()\n if args.test:\n logger = False\n # model = PLModel(args) \n model = PLModel.load_from_checkpoint(\"./log/pl_coco_f/version_4/checkpoints/last.ckpt\")\n model.freeze()\n model.to('cuda:4')\n model.args = args\n val_loader = model.val_dataloader()\n for i, batch in enumerate(val_loader):\n print(\"run {}\".format(i))\n low_cont,cont_img,low_style,style_img = batch\n coef, output = model(*batch)\n output = output.cpu()\n out = make_grid(output, normalize=True)\n cont = make_grid(cont_img, normalize=True)\n style = make_grid(style_img, normalize=True)\n # diff = make_grid(output - cont_img, normalize=False)\n # diff2 = torch.abs(diff).sum(dim=0).repeat(3, 1, 1)\n # image =torch.cat((cont, style, out, diff, diff2), 1)\n image =torch.cat((cont, style, out), 1)\n save_image(image, \"./output/fusion_{}.png\".format(i))\n print(\"saved {}\".format(i))\n else:\n logger = TensorBoardLogger(args.logdir, args.logname, flush_secs=1)\n if args.style_path == args.content_path:\n only_one_dataset = True\n else:\n only_one_dataset = False\n\n model = PLModel(args, only_one_dataset=only_one_dataset)\n ckp_set = ModelCheckpoint(save_last=True, monitor=\"train_loss\")\n trainer = pl.Trainer.from_argparse_args(args, \n checkpoint_callback=ckp_set, \n logger = logger,\n # distributed_backend = 'ddp',\n # gpus=[2,3,4,5,6]\n )\n trainer.fit(model)\n","sub_path":"main_pl.py","file_name":"main_pl.py","file_ext":"py","file_size_in_byte":8779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"623719198","text":"from setuptools import setup\nfrom os import path\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\n\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"pyramm\",\n version=\"1.1\",\n description=\"Provides a wrapper to the RAMM API and additional tools for positional referencing\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/captif-nz/pyramm',\n author=\"John Bull\",\n author_email=\"johnbullnz@gmail.com\",\n packages=[\"pyramm\"],\n install_requires=[\n \"requests\",\n \"pandas\",\n \"geopandas\",\n \"numpy\",\n \"shapely\",\n \"scipy\",\n \"pyproj\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"124356687","text":"import unittest\nimport numpy as np\nfrom grid import Grid\nfrom jugador import Jugador\nfrom tesoro import Tesoro\n\n\nclass TestGridMethods(unittest.TestCase):\n\n def test_get_grilla_vacia(self):\n size_x = 2\n size_y = 2\n grid = Grid(size_x, size_y)\n grid.generate_grilla_vacia()\n grilla = grid.get_grilla()\n expect = np.array([[0, 0], [0, 0]])\n self.assertTrue(np.array_equal(grilla, expect))\n\n def test_set_random_jugador(self):\n size_x = 2\n size_y = 2\n grid = Grid(size_x, size_y)\n grid.generate_grilla_vacia()\n jugador_new = Jugador()\n grid.set_jugador(jugador_new)\n grid.random_posicion_jugador()\n grilla = grid.get_grilla()\n self.assertEqual(np.count_nonzero(grilla == 2), 1)\n\n def test_set_random_tesoro(self):\n size_x = 2\n size_y = 2\n grid = Grid(size_x, size_y)\n grid.generate_grilla_vacia()\n tesoro_new = Tesoro()\n grid.set_tesoro(tesoro_new)\n grid.random_posicion_tesoro()\n grilla = grid.get_grilla()\n self.assertEqual(np.count_nonzero(grilla == 3), 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_grid.py","file_name":"test_grid.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"8812864","text":"# PAGE 53\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nnfs\nfrom nnfs.datasets import spiral_data\n\nfrom libraries import Activation_Softmax_Loss_CategoricalCorssentropy\nfrom libraries import Activation_Softmax\nfrom libraries import Loss_CategoricalCrossentropy\nfrom libraries import Activation_ReLU\nfrom libraries import Layer_Dense\nfrom libraries import Optimizer_SGD_version3\nfrom libraries import Optimizer_AdaGrad\n\n\n# initialize nnfs dataset\nnnfs.init()\n\n\n# Create dataset\nx, y = spiral_data(samples=100, classes=3)\n\n# Create dense layer with 2 input features and 64 output valus\ndense1 = Layer_Dense(2, 64)\n\n# Create ReLU activation (to be used with Dense Layer).\nactivation_relu = Activation_ReLU()\n\n# Create second dense layer with 64 iput features ( as we take output of previous layer here)\n# and 3 output values\ndense2 = Layer_Dense(64, 3)\n\n# Create softmax classfier's combined loss and activation\nloss_activation = Activation_Softmax_Loss_CategoricalCorssentropy()\n\n# Create optimizer kita menggunakan decay dan momentum untuk mendapatkan LR global minimum\noptimizer = Optimizer_AdaGrad(decay=1e-4)\n\n# Train in loop\nfor epoch in range(10001):\n\n # Perform a forward pass of our training data through this layer\n dense1.forward(x)\n\n # perform a forward pass through activation function\n # takes the output of first dense layer here\n activation_relu.forward(dense1.output)\n\n\n # Perform a forward pass through second dense layer\n # takes output of activation function of first layer as inputs\n dense2.forward(activation_relu.output)\n\n # Perform a forward pass through second Dense layer\n # takes the output of second dense layer here and return loss\n loss = loss_activation.forward(dense2.output, y)\n\n # Calculate accuracy from output of activation loss and target calculate values along first axis\n predictions = np.argmax(loss_activation.output, axis=1)\n if len(y.shape) == 2:\n y = np.argmax(y, axis=1)\n\n accuracy = np.mean(predictions == y)\n if not epoch % 100:\n print(f'epoch: {epoch}, ' +\n f'acc: {accuracy:.3f}, ' +\n f'loss: {loss:.3f}, ' +\n f'lr: {optimizer.current_learning_rate}')\n\n # Backward pass/ backpropagation\n loss_activation.backward(loss_activation.output, y)\n dense2.backward(loss_activation.dinputs)\n activation_relu.backward(dense2.dinputs)\n dense1.backward(activation_relu.dinputs)\n\n # Then we finnaly use our optimizer to update weights and biases\n\n # Update weight and biases\n optimizer.pre_update_params()\n optimizer.update_params(dense1)\n optimizer.update_params(dense2)\n optimizer.post_update_params()\n","sub_path":"2020/neuron_from_scratch/10_07_adgrad.py","file_name":"10_07_adgrad.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"652312341","text":"# A binary gap within a positive integer N is any maximal sequence of consecutive zeros that is\r\n# surrounded by ones at both ends in the binary representation of N.\r\n# For example, number 9 has binary representation 1001 and contains a binary gap of length 2.\r\n# The number 529 has binary representation 1000010001 and contains two binary gaps: one of length 4 and one of length 3. The number 20 has binary representation 10100 and contains one binary gap of length 1. The number 15 has binary representation 1111 and has no binary gaps. The number 32 has binary representation 100000 and has no binary gaps.\r\n# Write a function:\r\n# def solution(N)\r\n# that, given a positive integer N, returns the length of its longest binary gap.\r\n# The function should return 0 if N doesn't contain a binary gap.\r\n# For example, given N = 1041 the function should return 5, because N has binary representation 10000010001\r\n# and so its longest binary gap is of length 5. Given N = 32 the function should return 0, because N has binary\r\n# representation '100000' and thus no binary gaps.\r\n# Assume that:\r\n# N is an integer within the range [1..2,147,483,647].\r\n# Complexity:\r\n# expected worst-case time complexity is O(log(N));\r\n# expected worst-case space complexity is O(1).\r\n\r\ndef solution(number):\r\n temp_gap, max_gap = 0, 0\r\n bin_number = str(bin(number)).strip('0b')\r\n for number in bin_number:\r\n if number == '0':\r\n temp_gap += 1\r\n elif number == '1':\r\n if max_gap < temp_gap:\r\n max_gap = temp_gap\r\n temp_gap = 0\r\n return max_gap\r\n\r\nnumber = 539\r\nnumber1 = 1041\r\nnumber2 = 32\r\n\r\nprint(solution(number))\r\nprint(solution(number1))\r\nprint(solution(number2))","sub_path":"hw8.py","file_name":"hw8.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"30558361","text":"\n\nfrom xai.brain.wordbase.nouns._self import _SELF\n\n#calss header\nclass _SELVES(_SELF, ):\n\tdef __init__(self,): \n\t\t_SELF.__init__(self)\n\t\tself.name = \"SELVES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"self\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_selves.py","file_name":"_selves.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"329906174","text":"\"\"\"Simple webstuffs-getting module\"\"\"\n\n__WGET_LOCATION = \"/opt/local/bin/wget\"\n\nfrom urllib2 import URLError, HTTPError\nimport re\nimport random\n\nIP_ADDRESS_PATTERN = r\"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$\"\nIP_ADDRESS_SERVERS = [\n \"ifconfig.me\",\n \"icanhazip.com\"\n ]\n\ndef getIPAddress():\n random.shuffle(IP_ADDRESS_SERVERS)\n for source in IP_ADDRESS_SERVERS:\n text = getHTML(source, method=\"curl\").strip()\n if re.search(IP_ADDRESS_PATTERN, text) is not None:\n return text\n return None\n\ndef getHTML(url, method=\"wget\"):\n if method == \"wget\":\n import subprocess\n\n p = subprocess.Popen([__WGET_LOCATION, \"--output-document=-\", \"--no-verbose\", url], \n stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n html, err = p.communicate()\n if err[:5] == \"wget:\":\n raise URLError(err)\n return html\n\n elif method == \"curl\":\n import subprocess\n\n p = subprocess.Popen([\"curl\", url], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n html, err = p.communicate()\n errStart = err.find(\"curl:\")\n if errStart != -1:\n raise URLError(err[errStart:])\n return html\n\n elif method == \"urllib\":\n import urllib2\n\n headers = {\n 'User-Agent' : \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13\",\n 'Accept' : \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n 'Accept-Language' : \"Accept-Language: en-US,id-ID;q=0.8,id;q=0.6,en;q=0.4\"\n }\n req = urllib2.Request(url, None, headers)\n response = urllib2.urlopen(req)\n return response.read()\n \n # try:\n # response = urllib2.urlopen(req)\n # return response.read()\n # except urllib2.HTTPError, e:\n # if e.getcode() == 500:\n # return e.read()\n # else:\n # raise\n\n else:\n raise NotImplementedError\n\n","sub_path":"utils/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"179583364","text":"from bacteria_detector import config\nimport os\n\nswift_script_dir = os.path.dirname(os.path.realpath(__file__))\nswift_config_dir = os.path.join(swift_script_dir, 'config')\nconfig_file_path = os.path.join(swift_script_dir, \"config\", \"config.txt\")\n\n\ndef create_swift_conf_file():\n \"\"\" Create swift's config file \"\"\"\n config_params = f\"\"\"\n #!/usr/bash \n CUTADAPT=~/.conda/envs/qiime2-2020.8/bin/cutadapt\n VSEARCH=~/.conda/envs/qiime2-2020.8/bin/vsearch\n PRIMERS={swift_config_dir}/primers_16S_V1-9_anchored.fasta\n READLEN=130\n CLUSTERID=0.97\n CLASSIFIER_seq={swift_config_dir}/silva_132_99_16S.qza\n CLASSIFIER_tax={swift_config_dir}/consensus_taxonomy_7_levels.qza\n \"\"\"\n with open(config_file_path, 'w') as config_file:\n config_file.write(config_params)\n print(f'[---] Created swift\\'s config file in \"{config_file_path}\" ')\n\n\ndef get_run_cmd(sample_name: str, output_dir: str, input_dir: str) -> str:\n \"\"\" Get the command needed to run swift's algorithm on the given sample data \"\"\"\n output_dir = os.path.join(output_dir, sample_name)\n input_dir = os.path.join(input_dir, sample_name)\n swift_script_path = os.path.join(swift_script_dir, \"q2wkflow_v2.sh\")\n return f'{swift_script_path} {config_file_path} {input_dir} {output_dir}'\n","sub_path":"src/bacteria_detector/algorithms/swift/swift.py","file_name":"swift.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"614311793","text":"import os, json\r\nfrom pyrogram import Client, filters\r\nfrom pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\r\n\r\nAPI_ID = 7058291\r\nAPI_HASH = \"5b9ea5b6baa2905c7ae2822a04b8e835\"\r\nBOT_TOKEN = \"2131481070:AAFRCo1rLF-36poOF0oq6XLMIOXPZkfk8as\" #INSERISCI BOT TOKEN\r\nDEFAULT_ADMINS = [1940080581,910209349] #INSERISCI UNO O PIU' FOUNDER ID SEPARATI DA VIRGOLE\r\nCHANNEL = \"ndantiscamproof\" #INSERISCI CANALE ! IMPORTANTE ! INSERIRLO SENZA LA @ DAVANTI\r\n\r\n# CARICAMENTO SALVATAGGI #\r\nif os.path.exists(\"storage.json\"):\r\n with open(\"storage.json\", \"r+\") as f:\r\n SAVES = json.load(f)\r\nelse:\r\n SAVES = {\"Groups\": [], \"Scammers\": [], \"Staff\": DEFAULT_ADMINS}\r\n with open(\"storage.json\", \"w+\") as f:\r\n json.dump(SAVES, f)\r\n \r\n\r\ndef save():\r\n global SAVES\r\n with open(\"storage.json\", \"w+\") as f:\r\n json.dump(SAVES, f)\r\n \r\n\r\n###########################\r\n\r\nbot = Client(\"session\", API_ID, API_HASH, bot_token=BOT_TOKEN)\r\n\r\n@bot.on_message(filters.new_chat_members)\r\nasync def joinManager(client, message):\r\n global SAVES\r\n for user in message.new_chat_members:\r\n if user.is_self:\r\n if not message.chat.id in SAVES[\"Groups\"]:\r\n # MESSAGGIO APPENA METTI IL BOT NEL GRUPPO\r\n await message.reply_text(\"**Grazie per avermi aggiunto, per usare tutte le funzioni del bot mettimi admin e invia il comando /done.**\")\r\n elif message.chat.id in SAVES[\"Groups\"] and not user.is_bot:\r\n if user.id in SAVES[\"Scammers\"]:\r\n await client.kick_chat_member(message.chat.id, user.id)\r\n if user.username == None:\r\n if user.last_name == None:\r\n mention = f\"[{user.first_name}](tg://user?id={user.id})\"\r\n else:\r\n mention = f\"[{user.first_name} {user.last_name}](tg://user?id={user.id})\"\r\n else:\r\n mention = \"@\" + user.username\r\n # MESSAGGIO QUANDO ENTRA UNO SCAMMER\r\n await message.reply_text(f\"⚠️ [{user.first_name}](tg://user?id={user.id}) **era uno scammer ed è stato bannato ⚠️**\")\r\n \r\n \r\n \r\n\r\n@bot.on_message(filters.text)\r\nasync def commandsManager(client, message):\r\n global SAVES, CHANNEL \r\n if message.text.startswith(\"/addsupporter\"):\r\n if message.from_user.id in SAVES[\"Staff\"]:\r\n if message.reply_to_message == None:\r\n st = message.text.split(\" \")\r\n if st.__len__() == 2:\r\n try:\r\n usr = await client.get_users(st[1])\r\n if usr == None:\r\n await message.reply_text(\"**⚠️ Utente Non Trovato ⚠️**\")\r\n return\r\n else:\r\n ID = usr.id\r\n except:\r\n await message.reply_text(\"**⚠️ Utente Non Trovato ⚠️**\")\r\n return\r\n else:\r\n await message.reply_text(\"**⚠️ Specificare l' ID o la @ dell' utente ⚠️**\")\r\n return\r\n else:\r\n ID = message.reply_to_message.from_user.id\r\n if not ID in SAVES[\"Staff\"]:\r\n SAVES[\"Staff\"].append(ID)\r\n save()\r\n await message.reply_text(\"**👮Utente reso Supporter**\")\r\n try:\r\n await client.send_message(ID, \"**👮Sei un supporter ecco i tuoi comandi**\\n\\n🎛Comandi🎛\\n/netban id o @ link proof\\n/netunban id\")\r\n except:\r\n pass\r\n else:\r\n await message.reply_text(\"**👮Quest utente è già un supporter**\")\r\n elif message.text.startswith(\"/rmsupporter\"):\r\n if message.reply_to_message == None:\r\n st = message.text.split(\" \")\r\n if st.__len__() == 2:\r\n try:\r\n usr = await client.get_users(st[1])\r\n if usr == None:\r\n await message.reply_text(\"**🤔Utente Non Trovato**\")\r\n return\r\n else:\r\n ID = usr.id\r\n except:\r\n await message.reply_text(\"**🤔Utente Non Trovato**\")\r\n return\r\n else:\r\n await message.reply_text(\"**🔎Specificare l' ID o la @ dell' utente**\")\r\n return\r\n else:\r\n ID = message.reply_to_message.from_user.id\r\n if ID in SAVES[\"Staff\"]:\r\n SAVES[\"Staff\"].remove(ID)\r\n save()\r\n await message.reply_text(\"**😔Utente rimosso dalla lista dei supporter**\")\r\n try:\r\n await client.send_message(ID, \"**❌Sei stato rimosso dalla lista dei supporter**\")\r\n except:\r\n pass\r\n else:\r\n await message.reply_text(\"**⚠️ Quest utente non è un supporter⚠️**\")\r\n elif message.text.startswith(\"/netban\"):\r\n if message.from_user.id in SAVES[\"Staff\"]:\r\n st = message.text.split(\" \")\r\n if st.__len__() == 3 and st[2].startswith(\"http\"):\r\n if st[1].isnumeric():\r\n user = int(st[1])\r\n else:\r\n user = st[1]\r\n try:\r\n usr = await client.get_users(user)\r\n if usr == None:\r\n await message.reply_text(\"**⚠️ Utente Non Trovato ⚠️**\")\r\n return\r\n except:\r\n await message.reply_text(\"**⚠️ Utente Non Trovato ⚠️**\")\r\n return\r\n if not usr.id in SAVES[\"Scammers\"]:\r\n if message.from_user.username == None:\r\n if message.from_user.last_name == None:\r\n admin = f\"[{message.from_user.first_name}](tg://user?id={message.from_user.id})\"\r\n else:\r\n admin = f\"[{message.from_user.first_name} {message.from_user.last_name}](tg://user?id={message.from_user.id})\"\r\n else:\r\n admin = \"@\" + message.from_user.username\r\n msg = await message.reply_text(\"__Netban in corso...__\")\r\n if usr.username == None:\r\n if usr.last_name == None:\r\n mention = f\"[{usr.first_name}](tg://user?id={usr.id})\"\r\n else:\r\n mention = f\"[{usr.first_name} {usr.last_name}](tg://user?id={usr.id})\"\r\n else:\r\n mention = \"@\" + usr.username\r\n c = 0\r\n for group in SAVES[\"Groups\"]:\r\n try:\r\n await client.kick_chat_member(group, usr.id)\r\n # MESSAGGIO NETBAN\r\n await client.send_message(group, f\"⚠️UTENTE NETBANNATO⚠️\\n\\n👤Utente:{mention}\\n🆔 ID:`{usr.id}`\\n\\n🚫Netbannato da🚫\\n👮🏻‍♂Staff:{admin}\\n🆔:`{message.from_user.id}`\", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(\"📝 Prove\", url=st[2])]]))\r\n c += 1\r\n except:\r\n await client.send_message(group, \"**⚠️ Per funzionare al meglio il bot ha bisogno dei permessi admin ⚠️**\")\r\n await msg.edit(f\"**✅ Utente netbannato correttamente in {c} gruppi ✅**\")\r\n # MESSAGGIO NETBAN CAMBIARE ANCHE QUI\r\n await client.send_message(CHANNEL, f\"⚠️UTENTE NETBANNATO⚠️\\n\\n👤Utente:{mention}\\n🆔 ID:`{usr.id}`\\n\\n🚫Netbannato da🚫\\n👮🏻‍♂Staff:{admin}\\n🆔:`{message.from_user.id}`\", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(\"📝 Prove\", url=st[2])]]))\r\n SAVES[\"Scammers\"].append(usr.id)\r\n save()\r\n else:\r\n await message.reply_text(\"**⚠️ Utente già netbannato ⚠️**\")\r\n else:\r\n await message.reply_text(\"**⚠️ Sintassi Errata ⚠️\\nes»`/netban id o @ del utente link proof`**\")\r\n elif message.text.startswith(\"/netunban\"):\r\n if message.from_user.id in SAVES[\"Staff\"]:\r\n st = message.text.split(\" \")\r\n if st.__len__() == 2:\r\n if st[1].isnumeric():\r\n user = int(st[1])\r\n else:\r\n user = st[1]\r\n try:\r\n usr = await client.get_users(user)\r\n if usr == None:\r\n await message.reply_text(\"**⚠️ Utente Non Trovato ⚠️**\")\r\n return\r\n except:\r\n await message.reply_text(\"**⚠️ Utente Non Trovato ⚠️**\")\r\n return\r\n if usr.id in SAVES[\"Scammers\"]:\r\n if message.from_user.username == None:\r\n if message.from_user.last_name == None:\r\n admin = f\"[{message.from_user.first_name}](tg://user?id={message.from_user.id})\"\r\n else:\r\n admin = f\"[{message.from_user.first_name} {message.from_user.last_name}](tg://user?id={message.from_user.id})\"\r\n else:\r\n admin = \"@\" + message.from_user.username\r\n msg = await message.reply_text(\"__NetUnban in corso...__\")\r\n if usr.username == None:\r\n if usr.last_name == None:\r\n mention = f\"[{usr.first_name}](tg://user?id={usr.id})\"\r\n else:\r\n mention = f\"[{usr.first_name} {usr.last_name}](tg://user?id={usr.id})\"\r\n else:\r\n mention = \"@\" + usr.username\r\n c = 0\r\n for group in SAVES[\"Groups\"]:\r\n try:\r\n await client.unban_chat_member(group, usr.id)\r\n # MESSAGGIO SBAN\r\n await client.send_message(group, f\"✅ UTENTE SBANNATO ✅\\n\\n👤Utente:{mention}\\n🆔 ID: `{usr.id}`\\n\\n👀Sbannato da👀\\n👮🏻‍♂Staff:{admin}\\n🆔:`{message.from_user.id}`\")\r\n c += 1\r\n except:\r\n await client.send_message(group, \"**⚠️ Per funzionare al meglio il bot ha bisogno dei permessi admin ⚠️**\")\r\n await msg.edit(f\"**✅ Utente sbannato correttamente in {c} gruppi ✅**\")\r\n # MESSAGGIO SBAN CAMBIARE ANCHE QUI\r\n await client.send_message(CHANNEL, f\"✅ UTENTE SBANNATO ✅\\n\\n👤Utente:{mention}\\n🆔 ID: `{usr.id}`\\n\\n👀Sbannato da👀\\n👮🏻‍♂Staff:{admin}\\n🆔:`{message.from_user.id}`\")\r\n SAVES[\"Scammers\"].remove(usr.id)\r\n save()\r\n else:\r\n await message.reply_text(\"**⚠️ Quest utente non è netbannato ⚠️**\")\r\n else:\r\n await message.reply_text(\"**⚠️ Sintassi Errata ⚠️\\nes»'/netunban id o @ del utente`**\")\r\n elif message.text.startswith(\"/check\"):\r\n st = message.text.split(\" \", 1)\r\n if st.__len__() == 2:\r\n if st[1].isnumeric():\r\n ID = int(st[1])\r\n else:\r\n try:\r\n usr = await client.get_users(st[1])\r\n if usr == None:\r\n await message.reply_text(\"**⚠️Utente non trovato ⚠️\\nes»`/check id o @`**\")\r\n return\r\n else:\r\n ID = usr.id\r\n except:\r\n await message.reply_text(\"**⚠️non trovato ⚠️\\nes»`/check id o @`**\")\r\n return\r\n if ID in SAVES[\"Scammers\"]:\r\n \r\n await message.reply_text(\"**❌Utente presente nella lista blacklist**\")\r\n else:\r\n \r\n await message.reply_text(\"**✅Utente non e presente nella blacklist **\")\r\n elif message.chat.type == \"private\":\r\n if message.text == \"/start\":\r\n if message.chat.username == None:\r\n if message.chat.last_name == None:\r\n mention = f\"[{message.chat.first_name}](tg://user?id={message.chat.id})\"\r\n else:\r\n mention = f\"[{message.chat.first_name} {message.chat.last_name}](tg://user?id={message.chat.id})\"\r\n else:\r\n mention = \"@\" + message.chat.username\r\n # MESSAGGIO BENVENUTO CON BOTTONI\r\n await message.reply_text(f\"**👋🏻Benvenuto** {mention} **in LTSAntiScam**\\n\\n__Questo progetto e stato creato per tenere tranquillita nel mondo della compravendita clicca i bottoni per orientarti!__\", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(\"➕ Aggiungimi in un gruppo ➕\", url=\"https://t.me/\" + (await client.get_me()).username + \"/?startgroup=startgroup\")], [InlineKeyboardButton(\"🔎 Check Scammer🔎\", \"check\")], [InlineKeyboardButton(\"👮Assistenza👮\", url=\"https://t.me/Ciro_Rolex_x\")], [InlineKeyboardButton(\"📢 Canale📢 \", url=\"https://t.me/\" + CHANNEL), InlineKeyboardButton(\"👑 Staff👑\", \"staff\")], [InlineKeyboardButton(\"👮‍♂️Founder👮‍♂️\", url=\"https://t.me/Ciro_Rolex_x\")]]))\r\n elif message.text.startswith(\"/done\"):\r\n if not message.chat.id in SAVES[\"Groups\"]:\r\n if (await client.get_chat_member(message.chat.id, \"me\")).status == \"administrator\":\r\n await message.reply_text(\"**😁Gruppo aggiunto Correttamente da ora bannerò tutti gli scammer**\")\r\n SAVES[\"Groups\"].append(message.chat.id)\r\n save()\r\n else:\r\n await message.reply_text(\"**⚠️ Mettere il bot amministratore ⚠️**\")\r\n \r\n \r\n \r\n\r\n@bot.on_callback_query()\r\nasync def callbackQueryManaer(client, query):\r\n global SAVES, CHANNEL\r\n if query.data == \"back\":\r\n if query.message.chat.username == None:\r\n if query.message.chat.last_name == None:\r\n mention = f\"[{query.message.chat.first_name}](tg://user?id={query.message.chat.id})\"\r\n else:\r\n mention = f\"[{query.message.chat.first_name} {query.message.chat.last_name}](tg://user?id={query.message.chat.id})\"\r\n else:\r\n mention = \"@\" + query.message.chat.username\r\n # MESSAGGIO BENVENUTO CON BOTTONI CAMBIARE ANCHE QUI\r\n await query.message.edit(f\"**👋🏻Benvenuto** {mention} **in LTSAntiScam**\\n\\n__Questo progetto e stato creato per tenere tranquillita nel mondo della compravendita clicca i bottoni per orientarti!__\", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(\"➕ Aggiungimi in un gruppo ➕\", url=\"https://t.me/\" + (await client.get_me()).username + \"/?startgroup=startgroup\")], [InlineKeyboardButton(\"🔎 Check Scammer🔎\", \"check\")], [InlineKeyboardButton(\"👮Assistenza👮\", url=\"https://t.me/Ciro_Rolex_x\")], [InlineKeyboardButton(\"📢 Canale📢 \", url=\"https://t.me/\" + CHANNEL), InlineKeyboardButton(\"👑 Staff👑\", \"staff\")], [InlineKeyboardButton(\"👮‍♂️Founder👮‍♂️\", url=\"https://t.me/Ciro_Rolex_x\")]]))\r\n elif query.data == \"staff\":\r\n # MESSAGGIO LISTA STAFF\r\n msg = \"**👑 LISTA STAFF 👑**\\n\"\r\n for admin in SAVES[\"Staff\"]:\r\n try:\r\n usr = await client.get_users(admin)\r\n if usr == None:\r\n canMention = False\r\n else:\r\n canMention = True\r\n except:\r\n canMention = False\r\n if canMention:\r\n if usr.username == None:\r\n if usr.last_name == None:\r\n mention = f\"[{usr.first_name}](tg://user?id={usr.id})\"\r\n else:\r\n mention = f\"[{usr.first_name} {usr.last_name}](tg://user?id={usr.id})\"\r\n else:\r\n mention = \"@\" + usr.username\r\n else:\r\n mention = \"???\"\r\n msg += f\"\\n{mention} | `{admin}`\"\r\n await query.message.edit(msg, reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(\"🔙 Indietro\", \"back\")]]))\r\n elif query.data == \"check\":\r\n # MESSAGGIO CHECK SCAMMER\r\n await query.message.edit(\"**👀Per controllare se un utente è presente nella nostra blacklist devi semplicemente digitare /check [@ o ID]!\\n\\nEsempio: /check 12312312311**\", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(\"🔙 Indietro\", \"back\")]]))\r\n \r\n\r\nprint(\"Bot Avviato Correttamente!\")\r\n\r\nbot.run() ","sub_path":"antiscam.py","file_name":"antiscam.py","file_ext":"py","file_size_in_byte":16844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"27712059","text":"\nimport numpy as np\n\n\ndef figure_to_dynamic_measure(figure, measure):\n result = []\n dynamic = figure_to_dynamic(figure)\n if dynamic != None:\n for note, is_rest in measure:\n result.append(dynamic)\n else:\n if '<' in figure:\n start, end = figure.split('<')\n elif '>' in figure:\n start, end = figure.split('>')\n else:\n raise Exception(f'Unknown symbol {figure}')\n start, end = figure_to_dynamic(start), figure_to_dynamic(end)\n dynamics = np.linspace(start, end, len(measure)).astype(int)\n for dynamic in dynamics:\n result.append(dynamic)\n\n return result\n\ndef figure_to_dynamic(figure):\n if figure == 'ppp':\n return 20\n if figure == 'pp':\n return 40\n if figure == 'p':\n return 60\n if figure == 'mf':\n return 80\n if figure == 'f':\n return 100\n if figure == 'ff':\n return 110\n if figure == 'fff':\n return 120\n else:\n return None\n\n\ndef dynamic_to_figure(d):\n\n if d == 20:\n return 'ppp'\n if d == 40:\n return 'pp'\n if d == 60:\n return 'p'\n if d == 80:\n return 'mf'\n if d == 100:\n return 'f'\n if d == 110:\n return 'ff'\n if d == 120:\n return 'fff'\n else:\n return None\n\ndef parse_default_dynamics(parsed_rythm):\n result = []\n for i, voice in enumerate(parsed_rythm):\n V = []\n for j, chord in enumerate(voice):\n C = []\n for k, note in enumerate(chord):\n C.append(80)\n V.append(C)\n result.append(V)\n return result[::-1]\n\n\n\n\n\ndef dynamics_to_string(dynamics):\n result = \"\"\n for voice in dynamics[::-1]:\n result_voice = \"\"\n for measure in voice:\n for note in measure:\n result_voice += dynamic_to_figure(note) + ' '\n result_voice = result_voice[:-1]\n result_voice += ' | '\n result_voice = result_voice[:-3]\n result += result_voice + ' +\\n'\n result = result[:-3]\n return result\n\n\n\n\ndef parse_dynamics_with_rythm(dynamics, parsed_rythm):\n parsed_rythm = parsed_rythm[::-1]\n if dynamics is None:\n return parse_default_dynamics(parsed_rythm)\n import re\n dynamics = ' '.join(dynamics.split())\n dynamics = dynamics.replace('\\n', '')\n parsed_dynamic = ''\n\n voices = [r for r in re.split('(\\s?)+\\+(\\s?)+', dynamics) if r != '']\n result = []\n for idx_voice, voice in enumerate(voices):\n chords = [r for r in re.split('(\\s?)+\\|(\\s?)+', voice) if r != '']\n res = []\n\n a = len(chords)\n b = len(parsed_rythm[idx_voice])\n assert a == b, \\\n f\"Error parsing style measure at : \\n '{parsed_dynamic} ^' \\n with error : different measure length dynamic ({a}) != ({b}) rythm\"\n\n for idx_chord, chord in enumerate(chords):\n figures = chord.split(' ')\n if len(figures) == len(parsed_rythm[idx_voice][idx_chord]) and len(parsed_rythm[idx_voice][idx_chord]) > 1:\n new_res = []\n for figure in figures:\n try:\n el = figure_to_dynamic(figure)\n new_res.append(el)\n parsed_dynamic += \" \" + str(figure)\n except Exception as e:\n raise Exception(f\"Error parsing {figure} in dynamics at : \\n '{parsed_dynamic} ^' \\n with error : {str(e)}\")\n res.append(new_res)\n elif len(figures) == 1:\n try:\n el = figure_to_dynamic_measure(figures[0], parsed_rythm[idx_voice][idx_chord])\n res.append(el)\n parsed_dynamic += \" \" + str(figures[0])\n except Exception as e:\n raise Exception(\n f\"Error parsing {figures[0]} in dynamics at : \\n '{parsed_dynamic} ^' \\n with error : {str(e)}\")\n else:\n a = len(figures)\n b = len(parsed_rythm[idx_voice][idx_chord])\n raise Exception(\n f\"Error parsing measure in dynamics at : \\n '{parsed_dynamic} ^' \\n with error : Length of rythm ({a}) and dynamic ({b}) not the same \")\n parsed_dynamic += \" | \"\n parsed_dynamic = parsed_dynamic[:-3]\n parsed_dynamic += \" + \\n\"\n result.append(res)\n\n return result[::-1]\n\n\n","sub_path":"musiclang/compose/dynamics.py","file_name":"dynamics.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"64465791","text":"#!/usr/bin/env python3\n\n# references\n# https://docs.python.org/3.5/library/socket.html#example\n\n# lab solution\nimport socket\nimport time\nfrom multiprocessing import Process\n\nHOST = \"\"\nPORT = 8081\nBUFFER_SIZE = 1024\n\ndef handle_echo(conn, addr):\n with conn:\n #print(conn)\n full_data = b\"\"\n while True:\n data = conn.recv(BUFFER_SIZE)\n\n print(\"received {}\".format(data))\n if not data:\n break\n full_data += data\n time.sleep(2)\n print(\"done nap\")\n print(\"sending data...\")\n conn.sendall(full_data)\n conn.shutdown(socket.SHUT_WR)\n print(\"data sent\")\n\n\ndef main():\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((HOST, PORT))\n\n s.listen(1)\n\n while True:\n conn, addr = s.accept()\n #conn.setblocking(0)\n p = Process(target=handle_echo, args=(conn, addr))\n p.daemon = True\n p.start()\n print(\"Started process: \", p)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n# # ------------------------------------\n# # My solution\n# import socket\n#\n# HOST = ''\n# PORT = 8001\n#\n# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n# # socket.setsockopt(level, optname, value)\n# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n# s.bind((HOST, PORT))\n# s.listen(1)\n# print(\"Listening...\")\n#\n# # addr is pair (host, port) where port is the port used by client\n# conn, addr = s.accept()\n# with conn:\n# print('Connected by', addr)\n# data = conn.recv(1024)\n# print(\"Server Side: {}\".format(data))\n# conn.sendall(data)\n","sub_path":"multi_echo_server.py","file_name":"multi_echo_server.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"293781563","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom networkinstitute.models import CustomUser\n\n# Create your views here.\n@login_required\ndef home(request):\n\tmember = CustomUser.objects.get(id=request.user.id)\n\tfull_name = member.get_full_name()\n\temail = member.email\n\tcontext = {'full_name': full_name,\n\t\t\t\t'email': email,}\n\treturn render(request, \"userprofile/home.html\", context)","sub_path":"socialwebproject/userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"134302386","text":"import openpyxl\r\nimport json\r\n\r\n# Open json file with utf-8 encoding; insert your own json filename\r\nwith open(\"College_Basketball__80_League_295_2057_CT_summary.json\", encoding = 'utf-8-sig') as json_file: \r\n data = json.load(json_file)\r\n\r\n# Initialize list of games (will be tuple of (winning team, losing team))\r\ngames = []\r\n# Initalize dictionary of teams mapping their id to their name\r\nteams = dict()\r\n# Dictionary of every team's rating; key = team name, value = rating\r\nratings = dict()\r\n# Dictionary of every team's record; key = team name, value = list of [wins, losses]\r\nrecord = dict()\r\n# Dictionary of every team's expected wins; key = team name, value = expected wins\r\nexpected = dict()\r\n# Dictionary of every team's new rating calculated; key = team name, value = new rating\r\nnew_ratings = dict()\r\n# Dictionary of every team's info; key = team name, value = list of [win %, win ratio, strength of schedule]\r\ninfo = dict()\r\n# Value to compare every rating to and then do recursion\r\nDELTA = 0.0001\r\n# Main boolean used with while loop for recursion based on flag\r\ndone = False\r\n# If all values are within DELTA (i.e. should recursion finish)\r\nflag = True\r\n# Extract information that we need from the data\r\nfor t in data[\"teams\"]:\r\n # teams[id] = name\r\n teams[t[\"tid\"]] = t[\"region\"]\r\n # Initialize record to 0-0\r\n record[t[\"region\"]] = [0, 0]\r\n # Always start with 100 as every team's rating (using iteration to solve the recursive problem)\r\n ratings[t[\"region\"]] = 100\r\n\r\n# Create a Workbook\r\nwb = openpyxl.Workbook()\r\n# Create sheet to output games to so that postseason games can be easily added\r\nout = wb.active\r\nout.title = \"Games\"\r\n# Row counter\r\nr = 1\r\n# First row\r\nout.cell(row = r, column = 1).value = 'Winning Team'\r\nout.cell(row = r, column = 2).value = 'Winning Score'\r\nout.cell(row = r, column = 3).value = 'Losing Team'\r\nout.cell(row = r, column = 4).value = 'Losing Score'\r\n# Go through all the games and add tuple of (winning team, losing team) to list of games\r\n# In addition output the games to a sheet so that postseason games can be added with ease\r\nfor g in data[\"games\"]:\r\n winnerid = g[\"won\"][\"tid\"]\r\n loserid = g[\"lost\"][\"tid\"]\r\n games.append((teams[winnerid], teams[loserid]))\r\n # Incrememnt record of each team\r\n record[teams[winnerid]][0] += 1\r\n record[teams[loserid]][1] += 1\r\n # If team is undefeated, we'll get a divide by 0 error, so we set the ratio to 25\r\n # Note: this formula is most effective when there are no undefeated or winless teams\r\n # and a chain of wins (or ties) can be made from every team to any other team\r\n if record[teams[winnerid]][1] == 0:\r\n ratio = 25\r\n # If the team is winless, everything would automatically be 0, so set the ratio to 1/25\r\n elif record[teams[winnerid]][0] == 0:\r\n ratio = 1/25\r\n # Win ratio = wins / losses\r\n else:\r\n ratio = record[teams[winnerid]][0] / record[teams[winnerid]][1]\r\n # Add the team's win %, win ratio and sos is intialized to 0 (to be summed later)\r\n teaminfo = [record[teams[winnerid]][0] / (record[teams[winnerid]][0] + record[teams[winnerid]][1]), ratio, 0]\r\n # Add the list of team info to the info dict\r\n info[teams[winnerid]] = teaminfo\r\n # Repeat for losing team\r\n if record[teams[loserid]][1] == 0:\r\n ratio = 25\r\n elif record[teams[loserid]][0] == 0:\r\n ratio = 1/25\r\n # Win ratio = wins / losses\r\n else:\r\n ratio = record[teams[loserid]][0] / record[teams[loserid]][1]\r\n # Add the team's win %, win ratio and sos is intialized to 0 (to be summed later)\r\n teaminfo = [record[teams[loserid]][0] / (record[teams[loserid]][0] + record[teams[loserid]][1]), ratio, 0]\r\n # Add the list of team info to the info dict\r\n info[teams[loserid]] = teaminfo\r\n out.cell(row = r + 1, column = 1).value = teams[winnerid]\r\n out.cell(row = r + 1, column = 2).value = g[\"won\"][\"pts\"]\r\n out.cell(row = r + 1, column = 3).value = teams[loserid]\r\n out.cell(row = r + 1, column = 4).value = g[\"lost\"][\"pts\"]\r\n r += 1\r\n\r\n# Recursion for getting accurate ratings\r\nwhile not done:\r\n # Initialize the flag to True each time\r\n flag = True\r\n \r\n # Clear expected wins each iteration\r\n expected.clear()\r\n # For every game, calculate:\r\n for game in games:\r\n # Weighting factor (1 divided by the sum of the ratings of the 2 teams)\r\n wf = 1 / (ratings[game[0]] + ratings[game[1]])\r\n # Check to see if each team is in expected wins dictionary\r\n if game[0] in expected:\r\n # Multiply team's rating by weighting factor and add to sum\r\n expected[game[0]] += (ratings[game[0]] * wf)\r\n else:\r\n # Multiply team's rating by weighting factor and initialize as expected wins\r\n expected[game[0]] = (ratings[game[0]] * wf)\r\n if game[1] in expected:\r\n # Multiply team's rating by weighting factor and add to sum\r\n expected[game[1]] += (ratings[game[1]] * wf)\r\n else:\r\n # Multiply team's rating by weighting factor and initialize as expected wins\r\n expected[game[1]] = (ratings[game[1]] * wf)\r\n \r\n # For every team, calculate:\r\n for key in ratings:\r\n # New rating for the team equals the team's wins divided by expected wins multiplied by the old rating\r\n new_ratings[key] = (record[key][0] / expected[key]) * ratings[key]\r\n # Update the SOS for the team\r\n info[key][2] = new_ratings[key] / info[key][1]\r\n # If the difference between old rating and new rating <= DELTA\r\n # If flag is true, that means so far every team's new rating has been within DELTA\r\n # (since flag is initialized to true)\r\n if abs(ratings[key] - new_ratings[key]) <= DELTA and abs(record[key][0] - expected[key]) <= DELTA and flag:\r\n done = True\r\n # If the difference is greater than DELTA, we must continue the recursion\r\n # If flag is false, one team has already failed the DELTA test and we must continue the recursion\r\n else:\r\n flag = False\r\n done = False\r\n # After going through all teams, update ratings\r\n ratings = new_ratings\r\n\r\n# Scale the ratings to an average of 100\r\nfor i in range(10):\r\n scale_wins = 0\r\n for key in ratings:\r\n scale_wins += 100 / (100 + ratings[key])\r\n # The denominator should be the number of teams in the league divided by 2\r\n scale = scale_wins / 50\r\n \r\n # Adjust every team's rating and SOS according to scale\r\n for key in ratings:\r\n ratings[key] *= scale\r\n info[key][2] = ratings[key] / info[key][1]\r\n\r\n# Sort the teams by their rating\r\nsortedratings = sorted(ratings.items(), key=lambda kv: kv[1], reverse = True)\r\n# Create new sheet for output\r\nwb.create_sheet('Output')\r\n# For outputting the results into the spreadsheet\r\nout = wb.get_sheet_by_name('Output')\r\nfor r in range(len(sortedratings) + 1):\r\n # Header row\r\n if r == 0:\r\n out.cell(row = r + 1, column = 1).value = 'Rank'\r\n out.cell(row = r + 1, column = 2).value = 'Team'\r\n out.cell(row = r + 1, column = 3).value = 'Rating'\r\n out.cell(row = r + 1, column = 4).value = 'Win % Rank'\r\n out.cell(row = r + 1, column = 5).value = 'Record'\r\n out.cell(row = r + 1, column = 6).value = 'Win %'\r\n out.cell(row = r + 1, column = 7).value = 'Win Ratio'\r\n out.cell(row = r + 1, column = 8).value = 'SOS Rank'\r\n out.cell(row = r + 1, column = 9).value = 'SOS'\r\n else:\r\n # Nine columns\r\n for col in range(9):\r\n # Col 0 is the rank\r\n if col == 0:\r\n out.cell(row = r + 1, column = col + 1).value = r\r\n # Col 1 is the team name, col 2 is the rating\r\n elif col < 3:\r\n out.cell(row = r + 1, column = col + 1).value = sortedratings[r - 1][col - 1]\r\n # Formula to calculate the rank of winning %\r\n elif col == 3:\r\n out.cell(row = r + 1, column = col + 1).value = \"=RANK(F\" + str(r + 1) + \\\r\n \",F2:F\" + str(len(sortedratings) + 1) + \")\"\r\n # Output record\r\n elif col == 4:\r\n out.cell(row = r + 1, column = col + 1).value = str(record[sortedratings[r - 1][0]][0]) \\\r\n + \"-\" + str(record[sortedratings[r - 1][0]][1])\r\n # Output win % (col 5) and win ratio (col 6)\r\n elif col < 7:\r\n out.cell(row = r + 1, column = col + 1).value = info[sortedratings[r - 1][0]][col - 5]\r\n # Formula to calculate the SOS rank\r\n elif col == 7:\r\n out.cell(row = r + 1, column = col + 1).value = \"=RANK(I\" + str(r + 1) + \\\r\n \",I2:I\" + str(len(sortedratings) + 1) + \")\"\r\n # Output SOS (col 8)\r\n else:\r\n out.cell(row = r + 1, column = col + 1).value = info[sortedratings[r - 1][0]][col - 6]\r\n\r\n# Save the sheet with the output\r\nwb.save('Bradley-Terry Spreadsheet JSON NCBCA.xlsx')\r\n","sub_path":"Bradley-Terry Sports Model JSON version.py","file_name":"Bradley-Terry Sports Model JSON version.py","file_ext":"py","file_size_in_byte":9049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"556345579","text":"import boto3\r\nimport botocore\r\nfrom dotenv import dotenv_values, find_dotenv, load_dotenv\r\n\r\nbucket = 'loopqprize'\r\n\r\ntry:\r\n s3 = boto3.resource('s3')\r\n\r\n try:\r\n s3.meta.client.head_bucket(Bucket=bucket)\r\n print(\"Bucket connection stable!\\n\")\r\n except botocore.exceptions.ClientError as e:\r\n error_code = int(e.response['Error']['Code'])\r\n if error_code == 403:\r\n print(\"Private Bucket. Forbidden Access!\")\r\n elif error_code == 404:\r\n print(\"Bucket Does Not Exist! Contact the project owner!\")\r\n\r\nexcept KeyError as e:\r\n print(\".env must be configured!\")\r\n print(f\"Missing key: {e.args}\")\r\n","sub_path":"src/tests/test_aws_connection.py","file_name":"test_aws_connection.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"512712655","text":"import time\nfrom celery import Celery\n\n# for running periodic tasks\nfrom celery.decorators import periodic_task\n\n# for giving time differences\nfrom datetime import timedelta\n\n# to implement the schdule in crontab format\nfrom celery.schedules import crontab\n\n# define a celery module\napp = Celery('tasks', backend='mongodb://localhost:27017/celery', broker='redis://localhost:6379/0')\n\n# function to be performed with celery\n# define the task with task decorator:\n# you can give in your own name or celery will pick one for you. \n@app.task(name='tasks.add')\ndef add(x, y):\n total = x + y\n print('{} + {} = {}'.format(x, y, total))\n time.sleep(10)\n return total\n\n\ndef backoff(attempts):\n \"\"\"\n 1, 2, 4, 8, 16 (attempts would be in this sequence.)\n \"\"\"\n return 4 ** attempts\n\n# A task being bound means the first argument to the task will always be the task instance (self), just like Python bound methods\n\"\"\"\nlogger = get_task_logger(__name__)\n\n@task(bind=True)\ndef add(self, x, y):\n logger.info(self.request.id)\n\"\"\"\n# max_tretires is an attribute within the decorator app.task that retries to run the function in case of an exception. \n# The soft time limit for this task. When not set the workers default is used.\n@app.task(bind=True, max_retries=4, soft_time_limit=5)\ndef data_extractor(self):\n try:\n for i in range(1, 11):\n print('Crawling HTML DOM!')\n if i == 5:\n raise ValueError('Crawling Index Error')\n except Exception as exc:\n print('There was an exception lets rety after 5 seconds')\n # trigger the retry by invoking the task self object. \n # Countdown usage http://docs.celeryproject.org/en/latest/userguide/tasks.html#using-a-custom-retry-delay\n raise self.retry(exc=exc, countdown=backoff(self.request.retries))\n\n@periodic_task(run_every=(crontab(minute=0, hour=2)), name=\"tasks.send_mail_from_queue\")\ndef send_mail_from_queue():\n try:\n message_sent = \"example.email\"\n print(\"Email message successfully send, [{}]\".format(message_sent))\n finally:\n print(\"release resources\")","sub_path":"C5_Periodic_tasks_in_celery_crontab/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"234045121","text":"import torch\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport os\nimport SimpleITK as sitk\nimport PIL as Image\n\ndef get_data_paths(data_dir):\n # load each type individually\n flair, t1w, t1wce, t2, labels = np.loadtxt(data_dir,\n dtype=np.str_,\n unpack=True,\n delimiter=\",\")\n # convert labels to ints\n labels = labels.astype(np.int_)\n return flair, t1w, t1wce, t2, labels\n\n# very quickly made and tested dataloader. It's not very robust rn but its functional\n# instancing the class: foo = flair_dataset(\"./data/train/\", some_transform)\n# get data by indexing: foo[[1,2,3]] or foo[8] \n# returns [ [imglist], [labellist] ] \nclass flair_dataset(Dataset):\n def __init__(self, data_table, transform=None):\n self.paths, self.labels = data_table\n self.transform=transform\n \n def __len__(self):\n return len(self.paths)\n \n def __getitem__(self, item):\n if torch.is_tensor(item):\n item = item.tolist()\n \n # get list if multiple indeces are passed\n if isinstance(item, list):\n result = [sitk.GetArrayFromImage(sitk.ReadImage(self.paths[i])) for i in item]\n else:\n result = [sitk.GetArrayFromImage(sitk.ReadImage(self.paths[item]))]\n \n # convert to tensor to support batch transforms\n result = torch.FloatTensor(result)\n \n if self.transform is not None:\n result = self.transform(result)\n \n return result, self.labels[item]\n \n ","sub_path":"torch_tools/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"193969572","text":"# Import Required Modules\nimport csv\nimport os\nimport tkinter as tk\nfrom datetime import datetime\nfrom tkinter import filedialog\nfrom tkinter import font\nfrom tkinter import *\n\n# Create GUI window and set window title\nbuilder_win = tk.Tk()\nbuilder_win.title(\"CSV Report Builder\")\n\n# Set minimum window size\nbuilder_win.minsize(200, 400)\n\n# Create function to allow user to select file\ndef select_file():\n # Get current working directory\n current_wording_directory = os.getcwd()\n\n builder_win.filepath = filedialog.askopenfilename(initialdir = current_wording_directory, \n title = \"Select CSV File\", \n filetypes = ((\"CSV Files\",\"*.csv\"),))\n \n # Check to make sure that the user has actually selected a CSV file\n if builder_win.filepath != '':\n # Get the filename that is being opened without the file extensions\n builder_win.filename = builder_win.filepath.split('/')\n builder_win.filename = builder_win.filename[-1].split('.')[0]\n\n # Open given file\n with open(builder_win.filepath) as csv_file:\n csv_file_reader = csv.reader(csv_file)\n data = [row for row in csv_file_reader]\n\n # Clear Any Items from List Box\n csv_col_select_listbox.delete(0, END)\n\n # Add all of the differnet headers in the CSV file to the label for the user to see\n for index, col in enumerate(data[0]):\n csv_col_select_listbox.insert(index, col)\n\n # Change the button to allow the user to select an output directory\n select_file_output_path_button['state'] = 'normal'\n \n# Create function to allow user to select destination directory\ndef select_directory():\n # Get current working directory\n current_wording_directory = os.getcwd()\n\n builder_win.dirpath = filedialog.askdirectory(initialdir = current_wording_directory, \n title = \"Select a Directory\")\n\n if builder_win.dirpath != '':\n # Change the generate report button to normal to allow the user to generate their report\n generate_report_button['state'] = 'normal'\n\n# Create a function to generate a report in the specified directory\ndef create_report():\n # Create file[ath for output specified by user\n # The name of the file will be identical to the original file with 'output_' and the date the report is being generated ammended to the beginning\n name_of_output = \"output_\" + str(datetime.now()) + '_' + builder_win.filename\n complete_name_of_output = os.path.join(builder_win.dirpath, name_of_output + '.csv')\n\n # Get the user's selection(s) from the Listbox\n user_entry = csv_col_select_listbox.curselection()\n\n # Create a file with the generated filename\n file_output = open(complete_name_of_output, \"w\")\n \n # Initialize a variable to store the string that is to be written to the new file\n string_to_write_to_file = ''\n\n # Convert tuple of user selections from tuple into list\n user_entry_list = list(user_entry)\n\n # Open given file\n with open(builder_win.filepath) as csv_file:\n csv_file_reader = csv.reader(csv_file)\n data = [row for row in csv_file_reader]\n\n # For every row in the CSV file, select the columns specificed by the user\n for indx_row, row in enumerate(data):\n for indx_col, user_index in enumerate(user_entry_list):\n\n # If this is the last column in the row, don't add a comma at the end\n if indx_col == len(user_entry_list) - 1:\n string_to_write_to_file += row[int(user_index)]\n else:\n string_to_write_to_file += row[int(user_index)] + ','\n\n # If this is the last row in the CSV file, don't add a new line\n if indx_row != len(data) - 1:\n string_to_write_to_file += '\\n'\n \n # Strip any additional newline characters from the output file\n string_to_write_to_file.strip()\n\n # Write the string to the output file and close it\n file_output.write(string_to_write_to_file)\n file_output.close()\n\n# Create label and button for selecting a CSV file\nselect_file_path_label = tk.Label(builder_win, text = \"1. Select a CSV File\", bg=\"#5a5a5a\", fg=\"#ffffff\", font=\"-weight bold\", padx = 15)\nselect_file_path_label.grid(row = 0, column = 0, columnspan = 2, sticky='nesw')\n\nselect_file_path_button = tk.Button(builder_win, text = \"Select a CSV File\", command = select_file)\nselect_file_path_button.grid(row = 1, column = 0, columnspan = 2, sticky='nesw', padx = 15)\n\n# Create label and button for selecting output location of report\nselect_file_output_path_label = tk.Label(builder_win, text = \"2. Select Save Location\", bg=\"#5a5a5a\", fg=\"#ffffff\", font=\"-weight bold\", padx = 15)\nselect_file_output_path_label.grid(row = 2, column = 0, columnspan = 2, sticky='nesw', pady = (15,0))\n\nselect_file_output_path_button = tk.Button(builder_win, text = \"Select a Directory\", command = select_directory, state = \"disabled\")\nselect_file_output_path_button.grid(row = 3, column = 0, columnspan = 2, sticky='nesw', padx = 15)\n\n# Create label for selecting desired column section\ngenerate_report_label = tk.Label(builder_win, text = \"3. Select Desired Columns\", bg=\"#5a5a5a\", fg=\"#ffffff\", font=\"-weight bold\", padx = 15)\ngenerate_report_label.grid(row = 4, column = 0, columnspan=2, sticky='nesw', pady = (15,0))\n\n# Create label to display CSV headers\ncsv_columns_label = tk.Label(builder_win, text=\"Columns within CSV File\", font=\"-weight bold\", padx = 15)\ncsv_columns_label.grid(row = 5, column = 0, columnspan = 2, sticky='nesw', pady = (5,0))\n\n# Create label to instruct users about how to select CSV headers\ncsv_columns_label = tk.Label(builder_win, text=\"Select/deselect the CSV column names in the list below to include/exclude them in the new CSV file. Click the Generate Report button below to create the new CSV file with the selected data columns.\", wraplength = 200, padx = 15)\ncsv_columns_label.grid(row = 6, column = 0, columnspan = 2, sticky='nesw', pady = (0, 10))\n\n# Establish Listbox to allow user to select desired CSV columns for output report\ncsv_col_select_listbox = Listbox(builder_win, selectmode = MULTIPLE, exportselection = 0)\ncsv_col_select_listbox.grid(row = 7, column = 0, sticky='nesw', padx = (25,0), pady = (0,0))\n\n# Establish Scroll Bar to allow user to Scroll Through List of CSV Column Headers\ncsv_col_select_scrollbar = Scrollbar(builder_win)\ncsv_col_select_scrollbar.grid(row = 7, column = 1, sticky='ns', padx = (0,10))\n\n# Link Scroll Bar to List Box for CSV Columns\ncsv_col_select_listbox.config(yscrollcommand = csv_col_select_scrollbar.set)\ncsv_col_select_scrollbar.config(command = csv_col_select_listbox.yview)\n\n# Create label for generating report section\ngenerate_report_label = tk.Label(builder_win, text = \"4. Generate New CSV\", bg=\"#5a5a5a\", fg=\"#ffffff\", font=\"-weight bold\", padx = 15)\ngenerate_report_label.grid(row = 8, column = 0, columnspan=2, sticky='nesw', pady = (15,0))\n\n# Create a button to generate the report\ngenerate_report_button = tk.Button(builder_win, text = \"Generate Report\", command = create_report, state = \"disabled\")\ngenerate_report_button.grid(row = 9, column = 0, columnspan = 2, sticky='nesw', padx = 15)\n\n# Create a button to close the program\nclose_program_button = tk.Button(builder_win, text = \"Close Program\", command = quit, font=\"-weight bold\")\nclose_program_button.grid(row = 10, column = 0, columnspan = 2, sticky='nesw', pady = 7.5, padx = 15)","sub_path":"csv_report_builder.py","file_name":"csv_report_builder.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"371084464","text":"from Core.Parser import XML_parser\nfrom Core.DAO import DAO_DataFrame\nimport os\n\n\nclass XmlParser:\n\n @staticmethod\n def api_dict(pathToConfig, log, opts=None):\n try:\n dictionary = XML_parser.do_XML_parse(pathToConfig, log, opts)\n except Exception as e:\n log.raiseError(1, e)\n return dictionary\n\n def __init__(self, pathToConfig, log, opts=None):\n try:\n self.dictionary = XML_parser.do_XML_parse(pathToConfig, log, opts)\n except Exception as e:\n log.raiseError(1, e)\n\n pathToExcel = os.path.join(os.path.join(os.getcwd(), 'Source'), self.dictionary[\"importXml_path_value\"])\n if self.dictionary['checkMode_value'] == 'true':\n pathToExcel_link = os.path.join(os.path.join(os.getcwd(), 'Source'), self.dictionary[\"pathToLinkFile\"])\n\n arrOfColTypesInExcel = {}\n\n if self.dictionary[\"dictMode\"] == 'false':\n for prop in self.dictionary[\"excelColumns\"]:\n arrOfColTypesInExcel[prop['colName']] = prop['colType']\n elif self.dictionary[\"dictMode\"] == 'true':\n for prop in self.dictionary[\"excelColumns\"]:\n arrOfColTypesInExcel[prop['colName']] = prop['colType']\n for table in self.dictionary['withDict']:\n for prop in table['arrOfDictColumns']:\n arrOfColTypesInExcel[prop['colName']] = prop['colType']\n try:\n dao = DAO_DataFrame.ExcelSelect(pathToExcel, self.dictionary[\"sheetNumber_value\"], log, arrOfColTypesInExcel)\n self.dataFrame = dao.newDf\n self.df = dao.df\n\n log.raiseInfo(1, self.dictionary[\"importXml_path_value\"], dao.sheet_name, self.dictionary[\"sheetNumber_value\"] + 1)\n except Exception as e:\n log.raiseError(16, self.dictionary[\"importXml_path_value\"], int(self.dictionary[\"sheetNumber_value\"]) + 1, e.args[0])\n\n if self.dictionary['checkMode_value'] == 'true':\n # собираю массив свйоств и имен клонок для связанной таблицы checkMode\n arrOfColTypesInExcelLinked = {}\n for prop in self.dictionary[\"linkedColumns\"]:\n try:\n arrOfColTypesInExcelLinked[prop['linkedColName']] = \\\n list(filter(lambda x: x['colName'] == prop['colNameInSource'], self.dictionary['excelColumns']))[0]['colType']\n except:\n log.raiseError(17)\n\n try:\n if self.dictionary['checkMode_value'] == 'true':\n dao_link = DAO_DataFrame.ExcelSelect(pathToExcel_link,\n self.dictionary[\"linkedFileSheetNumber\"],\n log,\n arrOfColTypesInExcelLinked)\n self.dataFrame_link = dao_link.newDf\n self.df_link = dao_link.df\n except Exception as e:\n log.raiseError(16,\n self.dictionary[\"pathToLinkFile\"],\n int(self.dictionary[\"linkedFileSheetNumber\"]) + 1,\n e.args[0])\n","sub_path":"GUI/Core/DAO/XML_DAO.py","file_name":"XML_DAO.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"361345300","text":"\n\nimport os\nimport pickle\n\ndetails_list=[]\nl2=[]\nG = []\ndef file_save():\n NAME_PRO = details_list[0]\n ADDRESS_PRO = details_list[1]\n MOBILE_NO_PRO = details_list[2]\n ROOM_NO_PRO = details_list[3]\n PRICE_PRO = details_list[4]\n f = open(\"hotel.dat\", \"ab\")\n a=save(NAME_PRO,ADDRESS_PRO,MOBILE_NO_PRO,ROOM_NO_PRO,PRICE_PRO)\n pickle.dump(a,f,protocol=2)\n f.close()\n restart_program()\n\n\ndef restart_program():\n \"\"\"Restarts the current program.\n Note: this function does not return. Any cleanup action (like\n saving data) must be done before calling this function.\"\"\"\n python = sys.executable\n os.execl(python, python, * sys.argv)\n\n\n\n\n\n\nclass save:\n def __init__(self, NAME_PRO, ADDRESS_PRO, MOBILE_NO_PRO, ROOM_NO_PRO, PRICE_PRO): \n self.name=NAME_PRO\n self.address=ADDRESS_PRO\n self.mobile_no=MOBILE_NO_PRO\n self.room_no=ROOM_NO_PRO\n self.price=PRICE_PRO\n print(self.name,self.address,self.mobile_no,self.room_no,self.price)\n\n def check_room(): #polymorpohism\n pass\n\n\n\nimport sys\n\ntry:\n from Tkinter import *\nexcept ImportError:\n from tkinter import *\n\ntry:\n import ttk\n py3 = False\nexcept ImportError:\n import tkinter.ttk as ttk\n py3 = True\n\n\n\nclass New_Toplevel(save): #inheritance\n\n def __init__(self):\n def check_room():\n self.rom = str(self.data.get())\n print(self.rom)\n print(\"\\n\")\n if self.rom.isdigit() == True and len(self.rom) != 0:\n self.Text1.insert(INSERT, \" valid room number \"\"\\n\")\n v = int(self.rom)\n f = open(\"hotel.dat\", \"rb\")\n f1 = open(\"hote.dat\", \"ab\")\n n = 0\n try:\n while True:\n s = pickle.load(f)\n if s.room_no == v:\n n = 1\n name1 = s.name\n\n print(\" \")\n else:\n pickle.dump(s, f1)\n except EOFError:\n if n == 0:\n self.Text1.insert(INSERT, \"NO GUEST FOUND\"\"\\n\")\n\n elif n == 1:\n\n self.Text1.insert(INSERT, \"THANK YOU \" + name1.upper() + \" FOR VISTING US\"\"\\n\")\n pass\n f.close()\n f1.close()\n os.remove(\"hotel.dat\")\n os.rename(\"hote.dat\", \"hotel.dat\")\n\n else:\n self.Text1.insert(INSERT, \"invalid input please input a valid ROOM NO.\"\"\\n\")\n\n root = Tk()\n '''This class configures and populates the toplevel window.\n top is the toplevel containing window.'''\n _bgcolor = '#ffffff' # X11 color: 'white'\n _fgcolor = '#000000' # X11 color: 'black'\n _compcolor = '#ffffff' # X11 color: 'white'\n _ana1color = '#ffffff' # X11 color: 'white'\n _ana2color = '#ffffff' # X11 color: 'white'\n font10 = \"-family {Courier New} -size 10 -weight normal -slant\" \\\n \" roman -underline 0 -overstrike 0\"\n font11 = \"-family {Segoe UI} -size 23 -weight bold -slant \" \\\n \"roman -underline 0 -overstrike 0\"\n font12 = \"-family {Segoe UI} -size 24 -weight bold -slant \" \\\n \"roman -underline 0 -overstrike 0\"\n font9 = \"-family {Segoe UI} -size 9 -weight normal -slant \" \\\n \"roman -underline 0 -overstrike 0\"\n\n root.geometry(\"1011x750\")\n root.title(\"JAF HOTELS\")\n root.configure(background=\"#ffffff\")\n root.configure(highlightbackground=\"#ffffff\")\n root.configure(highlightcolor=\"black\")\n background_image=PhotoImage(file='10.png')\n background_label=Label(root, image=background_image)\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\n\n\n\n self.Frame1 = Frame(root)\n self.Frame1.place(relx=0.04, rely=0.04, relheight=0.91, relwidth=0.91)\n self.Frame1.configure(relief=GROOVE)\n self.Frame1.configure(borderwidth=\"2\")\n self.Frame1.configure(relief=GROOVE)\n self.Frame1.configure(background=\"cornsilk\")\n self.Frame1.configure(highlightbackground=\"#ffffff\")\n self.Frame1.configure(highlightcolor=\"black\")\n self.Frame1.configure(width=925)\n## photo=PhotoImage(file=\"KK.gif\")\n## self.Frame1.configure(image=photo, compound=CENTER)\n\n self.Label1 = Label(self.Frame1)\n self.Label1.place(relx=0.14, rely=0.12, height=46, width=442)\n self.Label1.configure(activebackground=\"#ffffff\")\n self.Label1.configure(activeforeground=\"white\")\n self.Label1.configure(background=\"white\")\n self.Label1.configure(disabledforeground=\"#bfbfbf\")\n self.Label1.configure(font=font11)\n self.Label1.configure(foreground=\"black\")\n self.Label1.configure(highlightbackground=\"#ffffff\")\n self.Label1.configure(highlightcolor=\"black\")\n self.Label1.configure(text='''ENTER THE ROOM NO. :''')\n\n self.Entry1 = Entry(self.Frame1)\n self.data=StringVar()\n self.Entry1.place(relx=0.67, rely=0.12,height=44, relwidth=0.07)\n self.Entry1.configure(background=\"white\")\n self.Entry1.configure(disabledforeground=\"#bfbfbf\")\n self.Entry1.configure(font=font10)\n self.Entry1.configure(foreground=\"black\")\n self.Entry1.configure(highlightbackground=\"#ffffff\")\n self.Entry1.configure(highlightcolor=\"black\")\n self.Entry1.configure(insertbackground=\"black\")\n self.Entry1.configure(selectbackground=\"#e6e6e6\")\n self.Entry1.configure(selectforeground=\"black\")\n self.Entry1.configure(textvariable=self.data)\n\n\n\n self.Text1 = Text(self.Frame1)\n self.Text1.place(relx=0.05, rely=0.54, relheight=0.4, relwidth=0.89)\n self.Text1.configure(background=\"white\")\n self.Text1.configure(font=font9)\n self.Text1.configure(foreground=\"black\")\n self.Text1.configure(highlightbackground=\"#ffffff\")\n self.Text1.configure(highlightcolor=\"black\")\n self.Text1.configure(insertbackground=\"black\")\n self.Text1.configure(selectbackground=\"#e6e6e6\")\n self.Text1.configure(selectforeground=\"black\")\n self.Text1.configure(width=824)\n self.Text1.configure(wrap=WORD)\n\n self.Button1 = Button(self.Frame1)\n self.Button1.place(relx=0.34, rely=0.28, height=93, width=286)\n self.Button1.configure(activebackground=\"#ffffff\")\n self.Button1.configure(activeforeground=\"#000000\")\n self.Button1.configure(background=\"white\")\n self.Button1.configure(disabledforeground=\"#bfbfbf\")\n self.Button1.configure(font=font12)\n self.Button1.configure(foreground=\"black\")\n self.Button1.configure(highlightbackground=\"purple\")\n self.Button1.configure(highlightcolor=\"pink\")\n self.Button1.configure(pady=\"0\")\n \n self.Button1.configure(text='''CHECK OUT''')\n \n \n self.Button1.configure(command=check_room)\n root.mainloop()\nfrom __main__ import *\nimport sys\n\ntry:\n from Tkinter import *\nexcept ImportError:\n from tkinter import *\n\ntry:\n import ttk\n py3 = False\nexcept ImportError:\n import tkinter.ttk as ttk\n py3 = True\n\nfo1=open(\"recipt.txt\",\"r\")\nlist1=fo1.readlines()\n\ndel list1[1]\ndel list1[2]\ndel list1[3]\ndel list1[4]\ndel list1[5]\nlist1[0]=list1[0][:-1]\nlist1[1]=list1[1][:-1]\nlist1[2]=list1[2][:-1]\nlist1[3]=list1[3][:-1]\nlist1[4]=list1[4][:-1]\n\np='''\n@@@@@ JAF HOTELS @@@@@\n\n@@@@@ DHA PHASE V, KARACHI @@@@@\n\n@@@@@ SERVING GUESTS SINCE @@@@@\n\nTHANK YOU FOR BEING HERE!!\n\n@@@@ ###1950### @@@@\n\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\n\nNAME-%s\nADDRESS-%s\nMOBILE NO.-%s\nYOUR TOTAL BILL IS Rs.-%s\nYOUR ROOM NUMBER IS %s\n'''%(list1[0],list1[1],list1[2],list1[4],list1[3])\n\n \n\n\n\n\n\nclass receipt:\n def __init__(self):\n root=Tk()\n '''This class configures and populates the toplevel window.\n top is the toplevel containing window.'''\n _bgcolor = '#d9d9d9' # X11 color: 'gray85'\n _fgcolor = '#000000' # X11 color: 'black'\n _compcolor = '#d9d9d9' # X11 color: 'gray85'\n _ana1color = '#d9d9d9' # X11 color: 'gray85'\n _ana2color = '#d9d9d9' # X11 color: 'gray85'\n\n root.geometry(\"800x800\")\n root.title(\"RECEIPT\")\n root.configure(background=\"cornsilk\")\n \n\n self.Label1 = Label(root)\n self.Label1.configure(background=\"cornsilk\", font=('Times', '16','bold'))\n self.Label1.place(relx=0, rely=0, height=800, width=800)\n self.Label1.configure(disabledforeground=\"#a3a3a3\")\n self.Label1.configure(foreground=\"black\")\n self.Label1.configure(text=p)\n self.Label1.configure(anchor=N)\n\n self.Label1.configure(wraplength=500)\n self.Label1.configure(justify =CENTER)\n\n self.Label1.configure(width=582)\n root.mainloop()\n\n\n\n\n\nif __name__ == '__main__':\n out=New_Toplevel()\n\nif __name__ == '__main__':\n receipt1=receipt()\n","sub_path":"checkoutgui.py","file_name":"checkoutgui.py","file_ext":"py","file_size_in_byte":9101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"316314666","text":"#!/usr/bin/python\n\nimport readline\n\nwhile True:\n try:\n cmd = raw_input(\"==> \") \n except KeyboardInterrupt as e:\n print(\"Keyboard Interrupt\")\n break\n print(\"Going to run {}\".format(cmd))\n if cmd == 'exit' or cmd == 'quit':\n print(\"Bye.\")\n break\n\n\n","sub_path":"tests/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"525551531","text":"from sardana.macroserver.macro import Macro, Type\nimport taurus\n\n\n# self.info() = output in blue \n# self.error() = output in red\n# self.output() = output in black\n\n\nclass oxfcryo(Macro):\n '''\n Prints the variable status of the Oxford cryostream \n '''\n def run(self):\n oxfcryo = taurus.Device('bl13/eh/oxfcryo')\n self.info('########## BL13/EH/OXFCRYO ##########'+\"\\n\")\n varsvar = ['Alarm','ControllerNr','EvapAdjust','EvapHeat','EvapTemp','GasError','GasFlow','GasHeat', 'GasSetPoint', 'GasTemp', 'LinePressure', 'Phase', 'RampRate', 'RunMode', 'RunTime', 'SoftwareVersion', 'State', 'Status', 'SuctHeat', 'SuctTemp', 'TargetTemp']\n for var in varsvar:\n try:\n line=var+\"=\"+str(oxfcryo[var].value)\n except:\n line=var+\"=NAN\"\n self.info(line) \n\n\n\n\n\n\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ALBA_BL13_XALOC_USER_MACROS/oxfcryo_lib.py","file_name":"oxfcryo_lib.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"73658694","text":"import os\nimport re\nimport chardet\nimport time\n\ndef get_chardet(filename):\n data=open(filename,'rb').read()\n coding=chardet.detect(data)\n return coding['encoding']\n\ndef wordlabel(filename,colors):\n encoding=get_chardet('data/'+filename)\n if encoding=='GB2312':\n encoding='GBK'\n text=open('data/'+filename,'r',encoding=encoding).read()\n html='\\r\\n\\r\\n\\r\\n\\r\\n{title}\\r\\n\\r\\n{body}\\r\\n\\r\\n'\n body=''\n textline='

{line}

'\n wordcolor='{word}'\n points=['.',',','?','!',\"'\",'《','》',':',';','\"','\\r\\n','\\n']\n for line in open('data/'+filename,'r',encoding=encoding):\n body+=textline.format(line=line)+'\\r\\n'\n for key in colors:\n body=body.replace(key,wordcolor.format(color=colors[key],word=key))\n return html.format(body=body,title=filename.replace('.txt',''))\n\ndef loadcolor():\n encoding=get_chardet('settings/color')\n if encoding=='GB2312':\n encoding='GBK'\n colors={}\n for line in open('settings/color','r',encoding=encoding):\n line=line.replace('\\r','').replace('\\n','').replace(' ','')\n try:\n colors[line.split('-')[0]]=line.split('-')[-1]\n except:\n continue\n return colors\n\ndef main():\n colors=loadcolor()\n for filename in os.listdir('data'):\n if filename.endswith('txt'):\n try:\n html=wordlabel(filename,colors)\n except:\n print(filename,'failed')\n continue\n f=open('result/%s.html'%(filename.replace('.txt','')),'w',encoding='utf-8')\n f.write(html)\n f.close()\n print(filename,'ok')\n time.sleep(50)\nmain()\n","sub_path":"labelnovel/labelnovel.py","file_name":"labelnovel.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"455818197","text":"# -*- coding: utf-8 -*-\n# Запуск тестов командой pytest -v -k test_bowling_api.py находясь в текущем каталоге unit\n\nfrom bowling import *\nimport pytest\n\n\nGAME_BAD_RESULT_LIST = [\n '9999',\n 'XYI',\n '-0-0',\n ]\n\n\ndef test_get_score_function_bad0_result():\n with pytest.raises(ErrorSumFrame) as exc_info:\n get_score(game_result=GAME_BAD_RESULT_LIST[0])\n assert \"Некорректное\" in str(exc_info.value)\n\n\ndef test_get_score_function_bad1_result():\n with pytest.raises(ErrorInputData) as exc_info:\n get_score(game_result=GAME_BAD_RESULT_LIST[1])\n assert \"Недопустимое\" in str(exc_info.value)\n\n\ndef test_get_score_function_bad2_result():\n with pytest.raises(ErrorInputData) as exc_info:\n get_score(game_result=GAME_BAD_RESULT_LIST[2])\n assert \"Недопустимое\" in str(exc_info.value)\n","sub_path":"lesson_014/tests/unit/test_bowling_api_bad_result.py","file_name":"test_bowling_api_bad_result.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"416032014","text":"# Helper classes\nclass Node:\n def __init__(self, data):\n self.data=data\n self.next=None\n\n\nclass LinkedList:\n\n def __init__(self):\n # initialization here\n self.head=None\n\n # adds a new node with the given value to the end of the list\n def insert (self, value):\n new_node = Node(value)\n if self.head is None :\n self.head = new_node\n self.first_node = new_node\n return 0\n\n current_node =self.head\n while current_node.next :\n current_node=current_node.next\n current_node.next=new_node\n\n def includes (self, key):\n current_node =self.head\n\n while current_node:\n if key == current_node.data:\n return True\n current_node=current_node.next\n return False\n\n def __str__ (self):\n current_node =self.head\n text =''\n while current_node:\n text = text + '{ ' + current_node.data + ' } -> '\n current_node=current_node.next\n text = text + ' -> NULL'\n return text\n\n# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n\n\nclass Hashtable :\n def __init__(self, size =1024):\n self.size = size\n self.buckets = []\n for i in range(size):\n self.buckets.append(None)\n\n def hash(self,key):\n # The same key should always produce the same hash code.\n sum = 0\n for char in key :\n sum+=ord(char)\n sum = (sum*23) % self.size\n return sum\n\n def add(self,value,key):\n hashed_key = self.hash(key)\n if not self.buckets[hashed_key]:\n self.buckets[hashed_key] = LinkedList()\n self.buckets[hashed_key].insert([key,value])\n return True\n\n def get(self,key):\n hashed_key = self.hash(key)\n node = self.buckets[hashed_key]\n if not node :\n return None\n node = self.buckets[hashed_key].head\n while node:\n if key == node.data[0] :\n return node.data[1]\n node = node.next\n\n def contains(self,key):\n hashed_key = self.hash(key)\n if self.buckets[hashed_key] :\n return True\n return False\n\n def __str__(self):\n return str(self.buckets)\n\n\n\n\n\nif __name__ == \"__main__\":\n hashtable = Hashtable()\n # hashtable.add('vale_1','key')\n print(hashtable.hash('za'))\n\n","sub_path":"python/repeated_word/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"222279336","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom fermulerpy.constants import accuracy\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.utils import shuffle\n\n\nclass dnn:\n def __init__ (self):\n self.d = None\n self.L = None\n self.J = None\n self.K = None\n self.prev_error = 0.0\n \n def sigmoid(self,t):\n return 1/(1 + np.exp(-1*t))\n def derivative(self,t):\n return t*(1-t)\n def predict(self,X):\n \n lp = []\n tX = X\n for n in range(len(tX)):\n x = []\n x.append(1)\n for p in tX[n]:\n x.append(p)\n x = np.array(x)\n a=[]\n s=[]\n for l in range(0,int(self.L)):\n a.append(np.dot(self.Wl[l],x))\n s.append(self.sigmoid(np.dot(self.Wl[l],x)))\n s.insert(0,1)\n a1=[]\n s1=[]\n for j in range(0,int(self.J)):\n a1.append(np.dot(self.Wh[j],s))\n s1.append(self.sigmoid(np.dot(self.Wh[j],s)))\n s1.insert(0,1)\n ao=[]\n so=[]\n \n \n for k in range(0,self.K):\n ao.append(np.dot(self.Wk[k],s1))\n so.append(self.sigmoid(np.dot(self.Wk[k],s1)))\n\n lp.append(np.argmax(so) +1)\n return lp\n def train(self,architecture,X,Y,tt,epochs=30,lr=0.1):\n self.d = architecture[0]\n self.L = architecture[1]\n self.J = architecture[2]\n self.K = architecture[3]\n np.random.seed(10)\n self.Wl = np.random.randn(self.L,self.d+1) \n self.Wh = np.random.randn(self.J,self.L + 1) \n self.Wk = np.random.randn(self.K,self.J + 1) \n mse = {}\n l = []\n for epoch in range(epochs):\n lr = ((0.9/epochs)*(epoch)) + 0.1\n Ex = 0\n tX = X\n tY = Y\n #tX = shuffle(X,random_state = epoch)\n #tY = shuffle(Y,random_state = epoch) \n for n in range(len(tX)):\n \n x = []\n x.append(1)\n for p in tX[n]:\n x.append(p)\n x = np.array(x)\n a=[]\n s=[]\n for l in range(0,int(self.L)):\n a.append(np.dot(self.Wl[l],x))\n s.append(self.sigmoid(np.dot(self.Wl[l],x)))\n s.insert(0,1)\n a1=[]\n s1=[]\n for j in range(0,int(self.J)):\n a1.append(np.dot(self.Wh[j],s))\n s1.append(self.sigmoid(np.dot(self.Wh[j],s)))\n s1.insert(0,1)\n ao=[]\n so=[]\n \n \n for k in range(0,self.K):\n ao.append(np.dot(self.Wk[k],s1))\n so.append(self.sigmoid(np.dot(self.Wk[k],s1)))\n \n Ex = Ex + 0.5*(tY[n][k] - so[k])*(tY[n][k] - so[k])\n \n\n #print(np.argmax(Y[n]),np.argmax(so))\n #print(Ex)\n #Backpropagation\n #Weight Updation for hidden to output layer\n delta_xk = []\n for k in range(self.K):\n temp = (tY[n][k] - so[k])*(self.derivative(so[k]))\n delta_xk.append(temp)\n for j in range(0,self.J + 1):\n for k in range(self.K):\n \n self.Wk[k][j] = self.Wk[k][j] + lr*delta_xk[k]*s1[j]\n \n #Weight Updation for hidden to hidden layer\n delta_xj = []\n for j in range(self.J):\n sigma_term = 0\n for k in range(self.K):\n sigma_term = sigma_term + delta_xk[k]*(self.Wk[k][j])\n for l in range(self.L + 1):\n temp = sigma_term*(self.derivative(s1[j+1]))\n delta_xj.append(temp)\n self.Wh[j][l] = self.Wh[j][l] + lr*temp*s[l]\n \n for l in range(self.L):\n sigma_term = 0\n for j in range(self.J):\n sigma_term = sigma_term + delta_xj[j]*(self.Wh[j][l])\n for i in range(self.d + 1):\n temp = sigma_term*(self.derivative(s[l+1]))\n self.Wl[l][i] = self.Wl[l][i] + lr*temp*x[i]\n \n #print(Ex)\n curr_error = Ex/(len(X))\n mse[epoch] = Ex/len(X)\n print(epoch,mse[epoch])\n if(epoch == 0):\n self.prev_error = curr_error\n else:\n if(epoch == epochs-1 ):\n \n break\n else:\n self.prev_error = curr_error\n \n \n plt.plot(mse.values())\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"MSE\")\n #plt.ylim([0, 1])\n plt.xticks(np.arange(0,epoch,2))\n plt.show()\n \ntrain_x = np.loadtxt('x_train.txt')\nval_x = np.loadtxt('x_val.txt')\ntest_x = np.loadtxt('x_test.txt')\ntrain_y=[]\nval_y = []\ntest_y = []\n\nfor i in range(40):\n train_y.append(1)\nfor i in range(40):\n train_y.append(2)\nfor i in range(40):\n train_y.append(3)\nfor i in range(10):\n val_y.append(1)\nfor i in range(10):\n val_y.append(2)\nfor i in range(10):\n val_y.append(3)\nfor i in range(50):\n test_y.append(1)\nfor i in range(50):\n test_y.append(2)\nfor i in range(50):\n test_y.append(3)\ntrain_y = np.array(train_y)\nval_y = np.array(val_y)\ntest_y = np.array(test_y)\n\nt1 = []\nfor i in train_y:\n if(i==1):\n t1.append([1,0,0])\n elif(i==2):\n t1.append([0,1,0])\n else:\n t1.append([0,0,1])\n\nt1 = np.array(t1)\n\n\ndnn_model = dnn()\narchitecture = [32,40,50,3]\ndnn_model.train(architecture,train_x,t1,train_y)\nprint(\"accuracy score on train data : \",accuracy(train_y,train_x,architecture))\nprint(\"accuracy score on val data : \",accuracy(val_y,val_x,architecture))\nprint(\"accuracy score on test data : \",accuracy(test_y,test_x,architecture))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Group10_Assignment1/Classification/MLFNN/Task_bovw.py","file_name":"Task_bovw.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"61015523","text":"'''\nFrom the Codecademy Python track\n\nDefine a function called product that takes a list of integers as input \nand returns the product of all of the elements in the list.\nFor example: product([4, 5, 5]) should return 100 (because 4 * 5 * 5 is 100).\nDon't worry about the list being empty. Your function should return an integer.\n'''\n\ndef product(integers):\n result = 1\n for i in range(len(integers)):\n result *= integers[i]\n return result","sub_path":"codecademy.product.py","file_name":"codecademy.product.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"518547467","text":"from rest_framework.authentication import TokenAuthentication\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom datetime import timedelta\nfrom django.utils import timezone\nfrom django.conf import settings\n\nclass ExpiringTokenAuthentication(TokenAuthentication):\n expired= False\n def expires_in(self,token): #contabiliza en cuanto tiempo expirara el token\n time_elapsed = timezone.now() - token.created # mira cuanto tiempo ha pasado desde que el token fue creado y el tiempo actual,es \n #decir el tiempo de vida del token\n left_time = timedelta(seconds= settings.TOKEN_EXPIRED_AFTER_SECONDS) - time_elapsed #traemos desde el settings, la variable\n #que hemos definido y restamos el tiempo que le definimos de tiempo de vida y el tiempo que ha pasado\n return left_time #retornamos el tiempo de vida que le queda al token\n\n def is_token_expired(self,token): # compara si el tiempo que paso ya ha expirado el token\n return self.expires_in(token) < timedelta(seconds=0)\n\n def token_expire_handler(self,token): #\n is_expire= self.is_token_expired(token) #mira si ya expiro el token, llamando a un conjunto de secuencias que //arriba\n if is_expire:\n user = token.user\n token.delete()\n token = self.get_model().objects.create(user= user) #le refresco el token\n self.expired = True\n return is_expire,token\n\n def authenticate_credentials(self,key):\n message,token,user = None,None,None\n try:\n token = self.get_model().objects.select_related('user').get(key=key) #recupera el tokn de los registros de token en la BD\n user= token.user\n except self.get_model().DoesNotExist: #si el token no existe en la consulta de los tokens, en lso registros es invalido\n message = 'Token invalido'\n self.expired = True\n if token is not None:\n if not token.user.is_active:\n message = 'Usuario no activo o eliminado'\n is_expired = self.token_expire_handler(token)\n if is_expired:\n message = 'Su Token ha Expirado'\n return (user,token,message,self.expired)\n","sub_path":"applications/users/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"133922230","text":"from PIL import Image\nimport gdal\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\n\ndef resize_raw(file):\n output = file[:-4] + '_tn.tif'\n im = Image.open(file)\n im = im.convert(\"RGBX\")\n im.thumbnail((500, 500))\n im.save(output)\n return output\n\n\ndef colormap():\n return mpl.colors.LinearSegmentedColormap.from_list('cmap', ['#FFFFFF','#bde5ef','#c2c7c8','#287ab9','#08306b'])\n\ndef resize_tif_result(file):\n output = file[:-4] + '_tn.tif'\n src_ds = gdal.Open(file, gdal.GA_ReadOnly)\n classified_image_data = src_ds.ReadAsArray()\n plt.figure(figsize=(8, 8))\n plt.axis('off')\n # imgplot = plt.imshow(classified_image_data, colormap())\n plt.imsave(output, classified_image_data, cmap = colormap())\n return output\n\n\npath = 'C:/Users/dsha/Documents/workspace/arcci_image/arcci-backend/arcciNew/static/media/uploads/'\ntif_file = path + 'DMS_1842638_01372_20180406_15080202.tif'\nresult_file = path + 'classified/DMS_1842638_01372_20180406_15080202_classified.tif'\nprint(resize_raw(tif_file))","sub_path":"resize_tif.py","file_name":"resize_tif.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"308925419","text":"import pickle as pkl\n\nimport numpy as np\nfrom numpy.linalg import pinv\n\n\nclass MovieOptimizer:\n def __init__(self):\n self.list_genre = {'Action': 1, 'Adventure': 2, 'Animation': 3, 'Biography': 4, 'Comedy': 5, 'Crime': 6,\n 'Documentary': 7, 'Drama': 8, 'Family': 9, 'Fantasy': 10, 'Film-Noir': 11, 'History': 12,\n 'Horror': 13, 'Music': 14, 'Musical': 15, 'Mystery': 16, 'Romance': 17, 'Sci-Fi': 18,\n 'Sport': 19, 'Thriller': 20, 'War': 21, 'Western': 22}\n self.initialize()\n\n @staticmethod\n def initialize():\n try:\n pkl.load(open('.theta.vec', 'rb'))\n except IOError:\n theta = np.zeros((25, 1))\n pkl.dump(theta, open('.theta.vec', 'wb'))\n try:\n pkl.load(open('.movie_score.dict', 'rb'))\n except IOError:\n rated = {}\n pkl.dump(rated, open('.movie_score.dict', 'wb'))\n\n def make_feature(self, name_dict):\n feat = np.zeros(25)\n feat[0] = 1\n genres = name_dict['Genre'].split(', ')\n for genre in genres:\n feat[self.list_genre.get(genre, 0)] = 1\n try:\n feat[23] = name_dict['imdbRating'] / 10.0\n except:\n feat[23] = 0\n try:\n feat[24] = name_dict['Metascore'] / 100.0\n except:\n feat[24] = 0\n return [feat, name_dict['Score']]\n\n def make_matrix(self):\n rated = pkl.load(open('.movie_score.dict', 'rb'))\n x_mat = []\n y_mat = []\n for name in rated:\n [feat, score] = self.make_feature(rated[name])\n x_mat.append(feat)\n y_mat.append(score)\n x_mat = np.array(x_mat)\n y_mat = np.array(y_mat).reshape(-1, 1)\n return [x_mat, y_mat]\n\n def predict(self, name_dict):\n [feat, score] = self.make_feature(name_dict)\n feat = feat.reshape(1, -1)\n theta = pkl.load(open('.theta.vec', 'rb'))\n return np.dot(feat, theta)\n\n def delete_update(self, id_dict):\n rated = pkl.load(open('.movie_score.dict', 'rb'))\n if id_dict['Title'] in rated:\n del rated[id_dict['Title']]\n pkl.dump(rated, open('.movie_score.dict', 'wb'))\n self.update()\n\n def add_update(self, id_dict):\n assert id_dict['Score'] is not None and id_dict['Score'] >= 0\n title = id_dict['Title']\n rated = pkl.load(open('.movie_score.dict', 'rb'))\n rated[title] = id_dict\n pkl.dump(rated, open('.movie_score.dict', 'wb'))\n self.update()\n\n def update(self):\n [x_mat, y_mat] = self.make_matrix()\n theta = np.dot(np.dot(pinv(np.dot(x_mat.T, x_mat)), x_mat.T), y_mat)\n pkl.dump(theta, open('.theta.vec', 'wb'))\n","sub_path":"MovieOptimizer.py","file_name":"MovieOptimizer.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"195449094","text":"\"\"\"\nlastVal = 0\n\nif target == curr:\n return curr.val\n \nif target < curr:\n curr = curr.left\n \nif target > curr:\n curr = curr.right\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def closestValue(self, root: TreeNode, target: float) -> int:\n closest_val = root.val\n dist = self.findDistance(closest_val, target)\n \n curr = root\n \n while curr:\n if target == curr.val:\n return curr.val\n \n elif target < curr.val:\n closest_val = min(closest_val, curr.val, key=lambda x:self.findDistance(x, target))\n curr = curr.left\n \n else:\n closest_val = min(closest_val, curr.val, key=lambda x:self.findDistance(x, target))\n curr = curr.right\n \n return closest_val\n \n def findDistance(self, currVal, target) -> int:\n return abs(currVal - target)\n \n ","sub_path":"closest-binary-search-tree-value/closest-binary-search-tree-value.py","file_name":"closest-binary-search-tree-value.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"571281849","text":"import psutil\nimport argparse\n\ndef view_mem(args):\n vm = psutil.virtual_memory()\n\n lst = ['Total', 'Available', 'Percent', 'Used', 'Free', 'Active', 'Inactive', 'Buffers', 'Cached', 'Shared']\n strform = '{:<10}{:3.3f}'\n \n for i in range(len(vm)):\n if i == 2:\n #print('{:<10}{:d}%'.format(lst[2], int(vm[2]))) # percentage\n continue\n if args == 'b':\n print(strform.format(lst[i], vm[i]))\n elif args == 'k':\n print(strform.format(lst[i], vm[i]/1024))\n elif args == 'm':\n print(strform.format(lst[i], vm[i]/1024**2))\n elif args == 'g':\n print(strform.format(lst[i], vm[i]/1024**3))\n elif args == 't':\n print(strform.format(lst[i], vm[i]/1024**4))\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', action='store_true', help='show output in bytes')\n parser.add_argument('-k', action='store_true', help='show output in kilobytes')\n parser.add_argument('-m', action='store_true', help='show output in megabytes')\n parser.add_argument('-g', action='store_true', help='show output in gigabytes')\n parser.add_argument('-t', action='store_true', help='show output in terabytes')\n parser.add_argument('-v', action='store_true', help='show version information')\n args = parser.parse_args()\n\n if args.b:\n view_mem('b')\n elif args.k:\n view_mem('k')\n elif args.m:\n view_mem('m')\n elif args.g:\n view_mem('g')\n elif args.t:\n view_mem('t')\n elif args.v:\n print(parser.prog + ' 1.0')\n print(\"Author: Barskov Sergey \")\n else:\n view_mem('m')\n\nif __name__ == '__main__':\n main()","sub_path":"mem.py","file_name":"mem.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"303296563","text":"import cx_Oracle\nimport getpass\nimport time\nimport sys\nimport os\nimport logging\nimport re\nfrom collections import OrderedDict\n\n#xSet = set()\n##def read():\n## #read file\n## path=\"C:\\\\Users\\\\jonesl9\\\\database\\\\dbfill.txt\"\n## file_object = open(path, \"r\")\n## lines = file_object.readlines()\n## file_object.close()\n##\n## #printing the lines\n## for x in lines:\n## x = x.strip()\n## xword = x.split()\n## for x1 in xword:\n## if x1.isalpha():\n## xSet.add(x1.lower())\n##\n## \n##read()\n\nip = '11.16.38.100'\nport = 1521\nSID = 'dblevi'\n\ndsn_tns = cx_Oracle.makedsn(ip, port, SID)\n\ndb = cx_Oracle.connect('scott','tiger',dsn_tns)\n\ncursor = db.cursor()\n\n\npath=\"C:\\\\Users\\\\jonesl9\\\\database\\\\dbfill.txt\"\nfile_object = open(path, \"r\")\nlines = file_object.readlines()\n##for item in range(10):\n## cursor.execute(\"insert into bonus(ename,job,sal,comm) values('test\"+str(item)+\"', 'man', 2342, 3453)\")\n##cursor.execute(\"commit\")\n\nfor x in lines:\n \n x = x.strip()\n t = 0\n p = 0\n xword = x.split()\n for x1 in xword:\n #print(x1)\n if ((t % 4) == 2 & p < 5):\n p = p+1\n if(xword[0] != 'name'):\n #print(\"insert into bonus(ename,job,sal,comm) values( '\"+ xword[0]+\"', '\"+xword[1]+\"', \"+xword[2]+\", \"+xword[3]+ \")\")\n cursor.execute(\"insert into bonus(ename,job,sal,comm) values( '\"+ xword[0]+\"', '\"+xword[1]+\"', \"+xword[2]+\", \"+xword[3]+ \")\")\n #cursor.execute('commit')\n t = t+1\ncursor.execute('commit')\n \n \n \n #cursor.execute(\"insert into bonus(ename,job,sal,comm) values( '\"+ xword[0]+\"', '\"+xword[1]+\"', \"+xword[2]+\", \"+xword[3]+ \")\")\n \n\n\n\n###test = cursor.execute('select * from emp order by 1')\n##\n##\n###print (test.fetchmany(2))\n##bill = \"'bill'\"\n##job = 'cleaner'\n##\n#cursor.execute(\"insert into bonus(ename,job,sal,comm) values('bill', 'cleaner', 123, 543 )\")\n###cursor.execute(\"commit\")\n##\n###delete entry\n##cursor.execute(\"delete from bonus where ename = 'bill'\")\n##cursor.execute('commit')\nfile_object.close()\n","sub_path":"pythonTools/dbconnection.py","file_name":"dbconnection.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"416811812","text":"import os\n\ndef get_ip(url):\n com = \"host \" + url\n ps = os.popen(com)\n results = str(ps.read())\n marker = results.find('has address') + 12\n return results[marker:].splitlines()[0]\n\nprint(get_ip('surepeople.com'))\n","sub_path":"ipf.py","file_name":"ipf.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"290619535","text":"#works just fine 1/25/11 (TIFF)\n# Linear trajectories\n# What happens when you change these values?\n# What is the meaning of each of these numbers?\n# Can you:\n## Make the panda charge at the camera\n## Make the panda start at the camera position and go away\n## Make the panda start on the left of the screen and move to the right\n## Make the panda go twice as fast\nfrom Panda import *\n\n# These define the initial values and rate for x, y, and x.\n\nx0 = 0\ndx = 1\ny0 = 0\ndy = 1\nz0 = 0\ndz = 1\n# This is called a \"parametric\" equation - you tell the function what\n# time it is and the function tells you where you are at that time.\ndef f(t):\n return P3(x0+dx*t, y0+dy*t, z0+dz*t)\n\nb = panda(position = f(time))\n\ncamera.position = P3(0, -10, 0)\n\nstart()","sub_path":"CompletedPandaHandouts/src/Panda Math/01-linearMotion.py.py","file_name":"01-linearMotion.py.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"392796444","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\n# Create your models here.\n\nclass Notice(models.Model):\n title = models.CharField(max_length=100)\n publish_date = models.DateTimeField('Date Published')\n expiry_date = models.DateTimeField('Expiry Date')\n content = models.ImageField(upload_to=\"Pictures\")\n misc_notes = models.CharField(max_length=1000)\n author = models.ForeignKey(User,related_name='author')\n\n def __unicode__(self):\n return self.title\n \n def is_expired(self):\n if timezone.now()>self.expiry_date:\n return True\n else:\n return False \n","sub_path":"NoticeBoard/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"609332182","text":"from src.command.command_handler import CommandHandler\nfrom tests.mocks.event_mock import EventMock\n\n\nclass SpecCommandHandler:\n\n def test_describes_interface(self):\n handler = self.given_default_command_handler()\n handler.public_command(None, None)\n handler.special_command(None, None)\n\n def test_event_support_commands(self):\n # test all the support commands for all command handler\n handler = self.given_default_command_handler()\n event = self.given_default_event()\n\n message = handler.get_full_message(event)\n assert message == '!buy 10'\n\n name = handler.get_twitch_name(event)\n assert name == 'antrazith'\n\n sub = handler.is_sub(event)\n assert sub is True\n\n @staticmethod\n def given_default_event():\n return EventMock()\n\n @staticmethod\n def given_default_command_handler():\n return CommandHandler(None, 'antrazith')\n","sub_path":"tests/command/command_handler_spec.py","file_name":"command_handler_spec.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"240493096","text":"import cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread(\"landscape.jpeg\")\r\n\r\nb, g, r = cv2.split(img)\r\n\r\nzeros = np.zeros(img.shape[:2], dtype=\"uint8\")\r\nprint(zeros)\r\n\r\ncv2.imshow(\"Original Image\", img)\r\ncv2.imshow(\"Red\", cv2.merge([zeros, zeros, r]))\r\ncv2.imshow(\"Green\", cv2.merge([zeros, g, zeros]))\r\ncv2.imshow(\"Blue\", cv2.merge([b, zeros, zeros]))\r\n\r\ncv2.waitKey(0)\r\n\r\ncv2.destroyAllWindows()\r\n","sub_path":"Applying_rb_filter.py","file_name":"Applying_rb_filter.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"606936457","text":"\"\"\"Script to pre-process the emotion data and perform data exploration\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport re\r\nimport collections\r\n\r\n# dictionary of text file names\r\ntext_files = {\"train text\": \"data/emotion/emotion_train.txt\",\r\n \"validation text\": \"data/emotion/emotion_validation.txt\",\r\n \"test text\": \"data/emotion/emotion_test.txt\"}\r\n\r\n# dictionary of label file names\r\nlabel_files = {\"train labels\": \"data/emotion/emotion_train_labels.txt\",\r\n \"validation labels\": \"data/emotion/emotion_validation_labels.txt\",\r\n \"test labels\": \"data/emotion/emotion_test_labels.txt\"}\r\n\r\n\r\n# function to open and do basic cleanse of text data\r\ndef preprocess(set_file):\r\n emotion_data = []\r\n with open(text_files[set_file], 'r', encoding='utf-8') as f:\r\n for line in f:\r\n line = line.replace(\"@user\", \"\")\r\n line = line.replace(\"#\", \"hashtag \")\r\n alphanumeric_filter = re.sub(r'[^A-Za-z0-9 ]+', '', line)\r\n new_line = \"\".join(alphanumeric_filter)\r\n emotion_data.append(new_line.replace(\"\\n\", \"\"))\r\n text_files[key] = emotion_data\r\n\r\n\r\n# function to open label data\r\ndef open_labels(key):\r\n emotion_labels = []\r\n with open(label_files[key], 'r') as f:\r\n for line in f:\r\n emotion_labels.append(line.replace(\"\\n\", \"\"))\r\n label_files[key] = emotion_labels\r\n\r\n\r\n# function to clean text one word at a time\r\ndef text_cleanse(string):\r\n # set values of items to be removed\r\n stop_words = ['the', 'a', 'and', 'is', 'be', 'will', \"to\", \"i\", \"of\", \"you\", \"in\", \"my\", \"that\", \"it\", \"for\", \"on\",\r\n \"me\", \"im\", \"with\", \"so\", \"this\", \"just\"]\r\n # convert string to lower case\r\n string = string.lower()\r\n # remove unnecessary words\r\n string = ' '.join([word for word in string.split() if word not in stop_words])\r\n return string\r\n\r\n\r\n# function to merge two lists to make tuple\r\ndef merge(list_1, list_2):\r\n merged_list = tuple(zip(list_1, list_2))\r\n return merged_list\r\n\r\n\r\n# loop over text files to open\r\nfor key in text_files:\r\n preprocess(key)\r\n\r\n# loop over label files to open\r\nfor key in label_files:\r\n open_labels(key)\r\n\r\n# create dictionary to store dataframes\r\ndataframes = {\"train\": None,\r\n \"validation\": None,\r\n \"test\": None}\r\n\r\nclean_raw_data = {\"train\": None,\r\n \"validation\": None,\r\n \"test\": None}\r\n\r\n# create iterable lists of dictionary keys\r\ntext_list = list(text_files)\r\nlabels_list = list(label_files)\r\ndataframes_list = list(dataframes)\r\n\r\n# loop over each data set to create dataframes and raw data for word frequency analysis\r\nfor i in range(3):\r\n dataset = merge(text_files[text_list[i]], label_files[labels_list[i]])\r\n X_train = [x[0] for x in dataset]\r\n y_train = [int(y[1]) for y in dataset]\r\n X_train = [text_cleanse(x) for x in X_train]\r\n clean_raw_data[dataframes_list[i]] = X_train\r\n dataframes[dataframes_list[i]] = pd.DataFrame({'Text': X_train, 'Target': y_train})\r\n\r\nfor i in dataframes.keys():\r\n file_name = 'data/emotion/{}_df.csv'.format(i)\r\n dataframes[i].to_csv(file_name, index=False)\r\n\r\n# print(dataframes[\"train\"].head())\r\n########################################################################################################################\r\n\"\"\"The following section conducts some data exploration\"\"\"\r\n\r\n# exploratory analysis of Target values\r\ncounts = pd.DataFrame(dataframes[\"train\"]['Target'].value_counts())\r\nprint(counts)\r\ncounts.plot(kind=\"bar\")\r\nplt.title(\"Emotion analysis classification distribution\")\r\nplt.xlabel(\"Class\")\r\nplt.ylabel(\"Count\")\r\nplt.show()\r\n\r\n# word counts\r\nwordcount = {}\r\n\r\nfor line in clean_raw_data[\"train\"]:\r\n for word in line.split():\r\n if word not in wordcount:\r\n wordcount[word] = 1\r\n else:\r\n wordcount[word] += 1\r\n\r\nn_print = int(input(\"How many most common words to print: \"))\r\nprint(\"The {} most common words are:\".format(n_print))\r\nword_counter = collections.Counter(wordcount)\r\nfor word, count in word_counter.most_common(n_print):\r\n print(word, \":\", count)\r\n\r\n# Create a data frame of the most common words\r\n# Draw a bar chart\r\nlst = word_counter.most_common(n_print)\r\ndf = pd.DataFrame(lst, columns=['Word', 'Count'])\r\ndf.plot.bar(x='Word', y='Count')\r\nplt.show()\r\n","sub_path":"preprocessing_scripts/emotion_analysis.py","file_name":"emotion_analysis.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"548932674","text":"'''\nAll Rights Reserved.\n\nCopyright (c) 2017-2019, Gyrfalcon technology Inc.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES;LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\nUSE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport gti.quantize as Q\nfrom gti.chip import spec\n\n\"\"\"Basic layers\nboolean tensors are uint8 before pytorch 1.2\"\"\"\nclass Conv2d(nn.Conv2d):\n \"\"\"A layer that implements 2D convolutions.\n Bias must always be present to appease the optimizer\n Bias is 0 initialized by default\n It can get a grad only if quant_params.fuse is True\n Consequently, it can only be not 0 if fuse is True\n\n Args:\n quant_params: GTI quantization parameters\n mask_bit: mask bitwidth for GTI quantized convolution\n *args/**kwargs - see nn.Conv2d\n \"\"\"\n def __init__(self, quant_params=None, mask_bit=None, *args, **kwargs):\n if quant_params.quant_w and (quant_params.chip is None or mask_bit is None):\n raise ValueError(\"Must specify chip and mask bit when quantizing.\")\n super(Conv2d, self).__init__(*args, **kwargs)\n self.register_buffer(\"quantize\", torch.tensor(quant_params.quant_w))\n self.chip = quant_params.chip #string so not easily saved; ASCII is dirty and messes up the dicts\n self.register_buffer(\"mask_bit\", torch.tensor(mask_bit))\n self.bias.requires_grad = quant_params.fuse\n\n def forward(self, x):\n if self.quantize:\n shift = Q.compute_shift(\n self.weight,\n self.bias if self.bias.requires_grad else None,\n self.chip,\n self.mask_bit.item()\n ).item()\n tmp_weight = Q.quantize_weight(\n self.weight,\n self.mask_bit,\n shift\n )\n if self.bias.requires_grad: #TODO(Yin): is this right???\n tmp_bias = Q.QuantizeShift.apply(self.bias, shift)\n else:\n tmp_bias = None\n return F.conv2d(\n x,\n tmp_weight,\n tmp_bias,\n self.stride,\n self.padding,\n self.dilation,\n self.groups\n )\n return F.conv2d(\n x,\n self.weight,\n self.bias,\n self.stride,\n self.padding,\n self.dilation,\n self.groups\n )\n\nclass ReLU(nn.ReLU):\n \"\"\"A layer that implements ReLU.\n\n Args:\n quantize: if True, quantizes activations\n cap: when quantization is active, activations are clipped to be no\n larger than cap\n act_bits: number of bits of precision to represent the activations\n **kwargs - see nn.ReLU\n \"\"\"\n def __init__(self, quantize=False, cap=31.0, act_bits=5, **kwargs):\n super(ReLU, self).__init__(**kwargs)\n self.register_buffer(\"quantize\", torch.tensor(quantize))\n self.register_buffer(\"cap\", torch.tensor(cap, dtype=torch.float32))\n self.register_buffer(\"max_act\", torch.tensor(spec.MAX_ACTIVATION_VALUE[act_bits]))\n\n def forward(self, x):\n if self.quantize:\n out = 0.5 * (torch.abs(x) - torch.abs(x - self.cap) + self.cap)\n factor = (self.max_act / self.cap)#.item() #uses less GPU RAM\n return Q.Round.apply(out * factor) / factor\n return F.relu(x, inplace=self.inplace)\n\nclass Upsample(nn.Module):\n \"\"\"A layer that implements Upsampling.\n\n Args:\n in_channels: number of input channels\n upsampling_mode: see models/deconv.py for a description of what the\n different modes mean\n \"\"\"\n def __init__(self, in_channels, upsampling_mode=\"REPEAT\"):\n super(Upsample, self).__init__()\n self.in_channels = in_channels\n if upsampling_mode == \"REPEAT\":\n self.register_buffer(\"up\", torch.ones(in_channels, 1, 2, 2))\n elif upsampling_mode == \"ZERO\":\n self.register_buffer(\"up\", torch.zeros(in_channels, 1, 2, 2))\n self.up[:,0,0,0]=1\n else:\n raise ValueError(\"Invalid upsampling_mode: \" + upsampling_mode)\n\n def forward(self, x):\n return F.conv_transpose2d(\n x,\n self.up,\n stride=2,\n groups=self.in_channels\n )\n\nclass Flatten(nn.Module):\n \"\"\"A layer that implements Flatten.\n Only necessary before pytorch 1.2\n\n Args:\n None\n \"\"\"\n def __init__(self):\n super(Flatten, self).__init__()\n\n def forward(self, x):\n return x.view(x.size(0), -1)\n\nclass Stride2Pooling(nn.Module):\n \"\"\"A layer that implements Stride2Pooling.\n Also called TopLeftPooling/SamplePooling (in each 2x2 block, picks\n top left)\n Only needed for ResBlock because residual addition occurs\n after conv, but before the stride 2\n\n Args:\n None\n \"\"\"\n def __init__(self):\n super(Stride2Pooling, self).__init__()\n\n def forward(self, x):\n return x[:,:,::2,::2]\n\n\"\"\"Main computation block\"\"\"\nclass ConvBlock(nn.Module):\n \"\"\"The main computation block that consists of a conv, (optional) BN, relu.\n\n Args:\n use_bn: if True, this block includes BN\n quant_params: GTI quantization parameters\n mask_bit: mask bitwidth for GTI quantized convolution\n other args: see nn.Conv2d\n\n Arg not included in interface:\n stride=1 by default\n stride=2 is a special case, and handled by the higher order computation blocks\n stride>2 not supported by chip\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size=3,\n padding=1, dilation=1, groups=1, use_bn=True, quant_params=None, mask_bit=1):\n super(ConvBlock, self).__init__()\n self.register_buffer(\"use_bn\", torch.tensor(use_bn))\n self.register_buffer(\"fuse\", torch.tensor(quant_params.fuse))\n self.cal = False #only done once, no need to save var\n new_fuse = quant_params.fuse or not use_bn\n old_fuse = quant_params.fuse\n quant_params.fuse = new_fuse\n self.conv = Conv2d(\n quant_params,\n mask_bit,\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n groups=groups\n )\n quant_params.fuse = old_fuse\n if use_bn:\n self.bn = nn.BatchNorm2d(out_channels)\n if quant_params.ten_bit_act:\n act_bits = 10\n else:\n act_bits = 5\n self.relu = ReLU(\n quant_params.quant_act,\n 0,\n act_bits,\n inplace=True\n )\n\n def set_status(self, qw, qa, fuse, cal=None):\n self.relu.quantize[...] = qa\n self.conv.quantize[...] = qw\n self.fuse[...] = fuse\n self.conv.bias.requires_grad = fuse\n if cal is not None:\n self.cal = cal\n\n def forward(self, x):\n x = self.conv(x)\n if self.use_bn and not self.fuse:\n x = self.bn(x)\n x = self.relu(x)\n if self.cal:\n #finds max (over epoch) of 99th percentile of each batch\n y = x.cpu().detach().numpy()\n temp = np.percentile(y, 99)\n if temp > self.relu.cap.item():\n self.relu.cap[...] = temp\n return x\n\nclass ReLUWrapper(nn.Module):\n \"\"\"Wraps a GTI ReLU layer to behave like a computation block.\n This is only used for the RELU_AFTER_ADDITION mode of residual blocks.\n\n Args:\n see ReLU\n \"\"\"\n def __init__(self, quantize=False, cap=31.0, act_bits=5, **kwargs):\n super(ReLUWrapper, self).__init__()\n self.relu = ReLU(quantize=False, cap=31.0, act_bits=5, **kwargs)\n self.cal = False\n\n def set_status(self, qw, qa, fuse, cal=None):\n self.relu.quantize[...] = qa\n if cal is not None:\n self.cal = cal\n\n def forward(self, x):\n x = self.relu(x)\n if self.cal:\n #finds max (over epoch) of 99th percentile of each batch\n y = x.cpu().detach().numpy()\n temp = np.percentile(y, 99)\n if temp > self.relu.cap.item():\n self.relu.cap[...] = temp\n return x\n\n\"\"\"Higher order computation blocks: all are either nn.Sequential or subclass it\"\"\"\ndef basic_conv_block(\n in_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n groups=1,\n downsample_mode=\"MAXPOOL\",\n block_size=1,\n use_bn=True,\n quant_params=None,\n mask_bit=1\n ):\n \"\"\"Basic convolution blocks comprised of multiple GTI conv layers.\n\n Args:\n in_channels: input channels, fixed for each conv layer except the first layer\n out_channels: output channels, fixed for all conv layers\n kernel_size: kernel size, fixed for all conv layers\n padding: has to be 1 for all conv layers due to chip compatibility\n downsample_mode: None = no downsampling; \"MAXPOOL\" = 2x2, stride 2\n Maxpool at the end; \"STRIDE2\" = stride 2 on last conv layer.\n On any given chip, \"MAXPOOL\" and \"STRIDE2\" cannot be mixed,\n but cannot be checked here. A similar constraint applies\n to upsampling_mode in deconv_block(s), if any.\n block_size: number of convolution layers\n use_bn: if True, every conv is followed by a BN (then relu)\n quant_params: GTI quantization parameters\n mask_bit: mask bitwidth for GTI quantized convolution\n groups: used for group convolutions - see nn.Conv2d\n\n Returns:\n GTI basic conv blocks wrapped in nn.sequential layers\n \"\"\"\n\n assert block_size>0, \"at least 1 block required\"\n layers = []\n for _ in range(block_size):\n layers.append(\n ConvBlock(\n in_channels,\n out_channels,\n kernel_size,\n padding=padding,\n groups=groups,\n use_bn=use_bn,\n quant_params=quant_params,\n mask_bit=mask_bit\n )\n )\n in_channels = out_channels\n if downsample_mode:\n if downsample_mode==\"MAXPOOL\":\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n elif downsample_mode==\"STRIDE2\":\n layers[-1].conv.stride = (2,2)\n else:\n raise NotImplementedError(\"Unrecognized downsample_mode: \" + downsample_mode)\n return nn.Sequential(*layers)\n\nclass ResBlock(nn.Sequential):\n \"\"\"Residual block with similar interface to nn.Seq\n This is necessary because nn.Seq cannot implement residual connections.\n\n Args:\n resnet_mode: see residual_block below\n *args: see nn.Sequential\n \"\"\"\n def __init__(self, *args, resnet_mode=\"ORIGINAL\"):\n super(ResBlock, self).__init__(*args)\n if resnet_mode==\"ORIGINAL\":\n if len(args)%2:\n self.pool = True\n else:\n self.pool = False\n elif resnet_mode==\"RELU_AFTER_ADDITION\":\n mod = len(args)%3\n if mod==1:\n self.pool = True\n elif mod==0:\n self.pool = False\n else:\n print(args)\n assert False, \"Improperly made ResBlock\"\n self.forward = self.forward2\n else:\n raise NotImplementedError(\"Resnet mode not supported: \" + resnet_mode)\n\n def forward(self, x):\n '''conv block, conv, optional BN, residual add, relu\n There may be an optional pool at the end of this block.'''\n for block_idx in range(0,len(self)-1,2):\n identity = x\n x = self[block_idx](x)\n block2 = self[block_idx+1]\n x = block2.conv(x)\n if block2.use_bn and not block2.fuse:\n x = block2.bn(x)\n x += identity\n x = block2.relu(x)\n if block2.cal:\n y = x.cpu().detach().numpy()\n temp = np.percentile(y, 99)\n if temp > block2.relu.cap.item():\n block2.relu.cap.data[...] = temp\n if self.pool:\n x = self[-1](x)\n return x\n\n def forward2(self, x):\n '''conv block, conv block, residual add, relu\n There may be an optional pool at the end of this block.'''\n for block_idx in range(0,len(self)-1,3):\n identity = x\n x = self[block_idx](x)\n x = self[block_idx+1](x)\n x += identity\n x = self[block_idx+2](x) #relu\n if self.pool:\n x = self[-1](x)\n return x\n\ndef residual_block(\n in_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n downsample_mode=\"MAXPOOL\",\n block_size=1,\n use_bn=True,\n quant_params=None,\n mask_bit=1,\n resnet_mode=\"ORIGINAL\"\n ):\n \"\"\"Residual blocks comprised of multiple residual convolution pairs.\n Due to residual connection, in channels must = out channels.\n Vanilla resnet has a conv in the identity path, which allows\n changing channel #; we do not have that option\n\n Args:\n in_channels: input channels, fixed for each conv layer except the first layer\n out_channels: output channels, fixed for all conv layers\n kernel_size: kernel size, fixed for all conv layers\n padding: has to be 1 for all conv layers due to chip compatibility\n downsample_mode: None = no downsampling; \"MAXPOOL\" = 2x2, stride 2\n Maxpool at the end; \"STRIDE2\" = stride 2 on last conv layer.\n On any given chip, \"MAXPOOL\" and \"STRIDE2\" cannot be mixed,\n but cannot be checked here. A similar constraint applies\n to upsampling_mode in deconv_block(s), if any.\n block_size: number of convolution layers\n use_bn: if True, every conv is followed by a BN (then relu)\n quant_params: GTI quantization parameters\n mask_bit: mask bitwidth for GTI quantized convolution\n resnet_mode: \"ORIGINAL\" - residual blocks consist of: ConvBlock, conv,\n BN, add in identity, relu. See also arxiv:1603.05027 figure 4a.\n \"RELU_AFTER_ADDITION\" - residual blocks consist of: ConvBlock,\n ConvBlock, add in identity, (another) relu. See also\n arxiv:1603.05027 figure 4c, but with an extra relu at the end.\n Due to chip limitation, residual blocks always appear in pairs.\n\n Returns:\n GTI residual conv blocks (may contains multiple residual paris \\\n depending on block_size) wrapped in nn.sequential layers\n \"\"\"\n\n assert in_channels==out_channels, \"input channels must equal \\\n output channels for GTI resnet block!\"\n assert block_size>0, \"at least 1 block required\"\n assert resnet_mode in [\"ORIGINAL\", \"RELU_AFTER_ADDITION\"]\n if quant_params.ten_bit_act:\n act_bits = 10\n else:\n act_bits = 5\n layers = []\n for block_idx in range(block_size*2):\n layers.append(\n ConvBlock(\n in_channels,\n out_channels,\n kernel_size,\n padding=padding,\n use_bn=use_bn,\n quant_params=quant_params,\n mask_bit=mask_bit\n )\n )\n if resnet_mode!=\"ORIGINAL\" and block_idx%2:\n layers.append(\n ReLUWrapper(\n quant_params.quant_act,\n 0,\n act_bits,\n inplace=True\n )\n )\n if downsample_mode:\n if downsample_mode==\"MAXPOOL\":\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n elif downsample_mode==\"STRIDE2\":\n layers.append(Stride2Pooling())\n else:\n raise NotImplementedError(\"Unrecognized downsample_mode: \" + downsample_mode)\n return ResBlock(*layers, resnet_mode=resnet_mode)\n\ndef depthwise_sep_block(\n in_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n downsample_mode=None,\n block_size=1,\n use_bn=True,\n quant_params=None,\n mask_bit=1\n ):\n \"\"\"Depthwise separable blocks comprised of multiple pairs of pointwise conv + depthwise conv.\n IMPORTANT: the order here is opposite to the commonly accepted order.\n This ordering reflects a chip limitation: mask_bit can only change\n between major layers, and cannot change within a major layer.\n Since a stride 2 conv ends a major layer, it would be awkward for this\n block to start with a stride 2 conv.\n To recover a sequence of \"regular\" depthwise separable blocks, pad the\n beginning and end with basic_conv_blocks.\n\n Args:\n in_channels: input channels, fixed for each conv layer except the first layer\n out_channels: output channels, fixed for all conv layers\n kernel_size: predefined; variable ignored\n padding: has to be 1 for all conv layers due to chip compatibility\n downsample_mode: None = no downsampling; \"MAXPOOL\" = 2x2, stride 2\n Maxpool at the end; \"STRIDE2\" = stride 2 on last conv layer.\n On any given chip, \"MAXPOOL\" and \"STRIDE2\" cannot be mixed,\n but cannot be checked here. A similar constraint applies\n to upsampling_mode in deconv_block(s), if any.\n block_size: number of convolution layers\n use_bn: if True, every conv is followed by a BN (then relu)\n quant_params: GTI quantization parameters\n mask_bit: mask bitwidth for GTI quantized convolution\n\n Returns:\n GTI depthwise separable blocks (may contain multiple pointwise\n and depthwise pairs depending on block_size) wrapped in nn.Sequential\n \"\"\"\n assert block_size>0, \"at least 1 block required\"\n layers = []\n for _ in range(block_size):\n layers.append(\n ConvBlock(\n in_channels,\n out_channels,\n kernel_size=1,\n padding=0,\n groups=1,\n use_bn=use_bn,\n quant_params=quant_params,\n mask_bit=mask_bit\n )\n )\n in_channels = out_channels\n layers.append(\n ConvBlock(\n in_channels,\n out_channels,\n kernel_size=3,\n padding=padding,\n groups=in_channels,\n use_bn=use_bn,\n quant_params=quant_params,\n mask_bit=mask_bit\n )\n )\n if downsample_mode:\n if downsample_mode==\"MAXPOOL\":\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n elif downsample_mode==\"STRIDE2\":\n layers[-1].conv.stride = (2,2)\n else:\n raise NotImplementedError(\"Unrecognized downsample_mode: \" + downsample_mode)\n return nn.Sequential(*layers)\n\ndef deconv_block(\n in_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n upsampling_mode=\"REPEAT\",\n block_size=1,\n use_bn=True,\n quant_params=None,\n mask_bit=1\n ):\n\n \"\"\"GTI device supported 'deconvolution', i.e. upsampling followed by GTI conv layers\n\n Args:\n in_channels: input channels, fixed for each conv layer except the first layer\n out_channels: output channels, fixed for all conv layers\n kernel_size: kernel size, fixed for all conv layers\n padding: has to be 1 for all conv layers due to chip compatibility\n upsampling_mode:\n - REPEAT: fill with repeats of current value, for example:\n 1 becomes [1, 1]\n [1, 1]\n - ZERO: fill with zeros, for example:\n 1 becomes [1, 0]\n [0, 0]\n On any given chip, \"REPEAT\" and \"ZERO\" cannot be mixed,\n but cannot be checked here. A similar constraint applies\n to downsample_mode in the other block(s), if any.\n block_size: number of convolution layers\n use_bn: if True, every conv is followed by a BN (then relu)\n quant_params: GTI quantization parameters\n mask_bit: mask bitwidth for GTI quantized convolution\n\n Returns:\n GTI deconv blocks (may contains multiple conv layers \\\n depending on block_size) wrapped in nn.sequential layers\n \"\"\"\n\n assert in_channels==out_channels, \"input channel must equal \\\n output channel for GTI deconv block!\"\n assert block_size>0, \"at least 1 block required\"\n\n layers = [Upsample(in_channels, upsampling_mode)]\n for _ in range(block_size):\n layers.append(\n ConvBlock(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n padding=padding,\n use_bn=use_bn,\n quant_params=quant_params,\n mask_bit=mask_bit\n )\n )\n return nn.Sequential(*layers)\n","sub_path":"ImageCompression_GTI-master/gti/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":21942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"166732920","text":"import unittest\nimport asyncio\n\nfrom deploy_isolation import DeploymentIsolater\nfrom rackhd.rackhd_api import RackHDAccessSession\nfrom sku import SkuManager\n\n\nclass TestRackAPIHandler(unittest.TestCase):\n\n def setUp(self):\n self.__deployment_isolator = DeploymentIsolater()\n self.__rackhd_config = self.__deployment_isolator.rackhd_config_instances()[0]\n if not self.__rackhd_config.is_rackhd_configured():\n raise unittest.SkipTest('rackhd server not setup')\n am_config_ctl = self.__deployment_isolator.asset_monitor_config()\n sku_manager = SkuManager(am_config_ctl)\n self.__api20_catalog_name_list = sku_manager.get_api20_catalog_name_list()\n\n def __test_async(self, method, *args, **kwargs):\n loop = asyncio.get_event_loop()\n res = loop.run_until_complete(method(*args, **kwargs))\n return res\n\n async def __get_nodes(self):\n async with RackHDAccessSession(self.__rackhd_config, self.__api20_catalog_name_list) as rackhd_session:\n nodes = await rackhd_session.init_auth_and_get_rackhd_node_identifiers()\n return nodes\n\n async def __get_catalogs_by_id(self, nodeid_index):\n async with RackHDAccessSession(self.__rackhd_config, self.__api20_catalog_name_list) as rackhd_session:\n nodes = await rackhd_session.init_auth_and_get_rackhd_node_identifiers()\n if nodeid_index is None:\n # get bogus node\n nodeid = '000'\n else:\n assert nodeid_index < len(nodes), 'node index {} outside of list-length {}'.format(nodeid_index, len(nodes))\n nodeid = nodes[nodeid_index]\n catalogs = await rackhd_session.get_rackhd_node_catalogs_by_id(nodeid)\n if catalogs is None:\n return None\n cat_map = {}\n for cat in catalogs:\n cat_map[cat['source']] = cat\n return cat_map\n\n def test_if_server_returns_system_node_identifier_list(self):\n nodes = self.__test_async(self.__get_nodes)\n self.assertIsNotNone(nodes)\n\n def test_if_server_returns_system_node_identifier_list_is_greater_equal_to_zero(self):\n nodes = self.__test_async(self.__get_nodes)\n self.assertGreaterEqual(len(nodes), 0)\n\n def test_if_server_returns_none_when_identifier_not_found(self):\n catalogs = self.__test_async(self.__get_catalogs_by_id, None)\n self.assertIsNone(catalogs)\n\n def __get_catalogs(self):\n identifiers = self.__rackhd_api.get_rackhd_node_identifiers()\n if len(identifiers) == 0:\n raise unittest.SkipTest('rackhd has zero node list')\n cats = self.__rackhd_api.get_rackhd_node_catalogs_by_id(identifiers[0])\n cat_map = {}\n for cat in cats:\n cat_map[cat['source']] = cat\n return cat_map\n\n def test_if_server_returns_node_data_with_valid_identifier_for_20_catalogs(self):\n cat_map = self.__test_async(self.__get_catalogs_by_id, 0)\n # we want to find at least one non-redfish...\n found_cnt = 0\n for cat_source in cat_map.keys():\n if 'redfish' not in cat_source:\n found_cnt += 1\n\n self.assertGreater(found_cnt, 0)\n\n def test_if_server_returns_node_data_with_valid_identifier_for_redfish_catalogs(self):\n cat_map = self.__test_async(self.__get_catalogs_by_id, 0)\n # we want to find at least one redfish...\n found_cnt = 0\n for cat_source in cat_map.keys():\n if 'redfish' in cat_source:\n found_cnt += 1\n\n self.assertGreater(found_cnt, 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unittests/test_rackhd_api.py","file_name":"test_rackhd_api.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"516654712","text":"# -- coding = 'utf-8' -- \n# Author Kylin\n# Python Version 3.7.3\n# OS macOS\n\"\"\"\nNo.26 删除有序数组中的重复项\n需求:给你一个升序排列的数组nums,请你原地删除重复出现的元素,使每个元素只出现一次 ,返回删除后数组的新长度。\n注意:· 元素的相对顺序应该保持一致 。\n · 不要使用额外的空间,你必须在原地修改输入数组并在使用O(1)额外空间的条件下完成。\n\"\"\"\n\n\ndef removeDuplicates(nums):\n \"\"\"\n 双指针,一个指向不含重复数组的元素索引,一个指向当前元素。\n 元素已经有序,重复元素一定相邻,即将不重复的元素左移\n 时间复杂度:\n 空间复杂度:\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n_before = len(nums)\n\n if n_before <= 1:\n return n_before\n\n # 慢指针指向数组头部,指向去除重复元素后数组的末尾位置\n # 快指针指向当前元素\n # fast从1开始,因为是从索引为1的元素开始删除\n slow, fast = 0, 1\n while fast < n_before:\n if nums[slow] != nums[fast]:\n # 如果slow位置和fast位置不相等,就将元素加入去除重复元素后的数组\n slow += 1\n nums[slow] = nums[fast]\n\n fast += 1\n\n # 因为slow是从0开始的,计算长度的时候再+1\n return slow + 1\n\n\nif __name__ == \"__main__\":\n nums = [1, 1, 2]\n length = removeDuplicates(nums)\n print(length)","sub_path":"LeetCode/src/search07/remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"82297205","text":"import sys\n\nfrom functools import partial\nfrom itertools import count, izip\nfrom pathlib import Path\n\nfrom PySide import QtGui\nfrom PySide.QtCore import Qt, QEvent, QSettings\nfrom PySide.QtGui import (QAction, QDesktopServices, QHBoxLayout, QMenu,\n QMessageBox, QVBoxLayout, QWidget)\n\n# This import is to register our icon resources with QT\nimport inselect.gui.icons # noqa\n\nfrom inselect.lib.document import InselectDocument\nfrom inselect.lib.document_export import DocumentExport\nfrom inselect.lib.ingest import ingest_image, IMAGE_PATTERNS, IMAGE_SUFFIXES_RE\nfrom inselect.lib.inselect_error import InselectError\nfrom inselect.lib.utils import debug_print, is_writable\n\nfrom .about import show_about_box\nfrom .colours import colour_scheme_choice\nfrom .info_widget import InfoWidget\nfrom .cookie_cutter_choice import cookie_cutter_choice\nfrom .cookie_cutter_widget import CookieCutterWidget\nfrom .format_validation_problems import format_validation_problems\nfrom .model import Model\nfrom .plugins.barcode import BarcodePlugin\nfrom .plugins.segment import SegmentPlugin\nfrom .plugins.subsegment import SubsegmentPlugin\nfrom .recent_documents import RecentDocuments\nfrom .roles import RotationRole\nfrom .user_template_choice import user_template_choice\nfrom .utils import contiguous, report_to_user, qimage_of_bgr\nfrom .views.boxes import BoxesView, GraphicsItemView\nfrom .views.metadata import MetadataView\nfrom .views.object import ObjectView\nfrom .views.selector import SelectorView\nfrom .views.summary import SummaryView\nfrom .worker_thread import WorkerThread\n\n\nclass MainWindow(QtGui.QMainWindow):\n \"\"\"The application's main window\n \"\"\"\n DOCUMENT_FILE_FILTER = u'Inselect documents (*{0});;Images ({1})'.format(\n InselectDocument.EXTENSION,\n u' '.join(IMAGE_PATTERNS)\n )\n\n IMAGE_FILE_FILTER = u'Images ({0})'.format(u' '.join(IMAGE_PATTERNS))\n\n def __init__(self, app, filename=None):\n super(MainWindow, self).__init__()\n self.app = app\n\n # Boxes view\n self.view_graphics_item = GraphicsItemView()\n # self.boxes_view is a QGraphicsView, not a QAbstractItemView\n self.boxes_view = BoxesView(self.view_graphics_item.scene)\n\n # Object, metadata and summary views\n self.view_metadata = MetadataView()\n self.view_object = ObjectView()\n self.view_summary = SummaryView()\n self.view_selector = SelectorView()\n\n # Views in tabs\n self.tabs = QtGui.QTabWidget()\n self.tabs.addTab(self.boxes_view, 'Boxes')\n self.tabs.addTab(self.view_object, 'Objects')\n self.tabs.setCurrentIndex(0)\n\n # Information about the loaded document\n self.info_widget = InfoWidget()\n\n # Cookie cutter widget\n self.cookie_cutter_widget = CookieCutterWidget()\n self.cookie_cutter_widget.save_to_new_action.triggered.connect(\n self.save_to_cookie_cutter\n )\n self.cookie_cutter_widget.apply_current_action.triggered.connect(\n self.apply_cookie_cutter\n )\n cookie_cutter_choice().cookie_cutter_changed.connect(\n self.new_cookie_cutter\n )\n\n # Metadata view above info\n sidebar_layout = QVBoxLayout()\n sidebar_layout.addWidget(self.view_metadata.widget)\n sidebar_layout.addWidget(self.info_widget)\n sidebar = QWidget()\n sidebar.setLayout(sidebar_layout)\n\n # Summary view below tabs\n summary_and_tabs_layout = QVBoxLayout()\n # Remove margins and padding\n summary_and_tabs_layout.setContentsMargins(0, 0, 0, 0)\n summary_and_tabs_layout.setSpacing(0)\n summary_and_tabs_layout.addWidget(self.tabs)\n summary_and_tabs_layout.addWidget(self.view_summary.widget)\n summary_and_tabs = QWidget()\n summary_and_tabs.setLayout(summary_and_tabs_layout)\n\n # Tabs alongside metadata fields\n self.splitter = QtGui.QSplitter()\n self.splitter.addWidget(summary_and_tabs)\n self.splitter.addWidget(sidebar)\n self.splitter.setSizes([600, 300])\n\n # Main window layout\n self.setCentralWidget(self.splitter)\n\n # Document\n self.document = None\n self.document_path = None\n\n # Model\n self.model = Model()\n self.model.modified_changed.connect(self.modified_changed)\n\n # Views\n self.view_graphics_item.setModel(self.model)\n self.view_metadata.setModel(self.model)\n self.view_object.setModel(self.model)\n self.view_summary.setModel(self.model)\n self.view_selector.setModel(self.model)\n\n # A consistent selection across all views\n sm = self.view_object.selectionModel()\n self.view_graphics_item.setSelectionModel(sm)\n self.view_metadata.setSelectionModel(sm)\n self.view_summary.setSelectionModel(sm)\n self.view_selector.setSelectionModel(sm)\n\n # Plugins\n self.plugins = (SegmentPlugin, SubsegmentPlugin, BarcodePlugin)\n # QActions. Populated in self.create_menu_actions()\n self.plugin_actions = len(self.plugins) * [None]\n # QActions. Populated in self.create_menu_actions()\n self.plugin_config_ui_actions = len(self.plugins) * [None]\n self.plugin_image = None\n self.plugin_image_visible = False\n\n # Colour scheme QActions. Populated in self.create_actions()\n self.colour_scheme_actions = []\n\n # Long-running operations are run in their own thread.\n self.running_operation = None\n\n self.create_menu_actions()\n self.create_non_menu_actions()\n self.create_toolbar()\n self.create_menus()\n\n # Conect signals\n self.tabs.currentChanged.connect(self.current_tab_changed)\n sm.selectionChanged.connect(self.selection_changed)\n colour_scheme_choice().colour_scheme_changed.connect(\n self.colour_scheme_changed\n )\n\n # Filter events\n self.tabs.installEventFilter(self)\n self.boxes_view.installEventFilter(self)\n self.view_metadata.installEventFilter(self)\n self.view_object.installEventFilter(self)\n self.view_summary.installEventFilter(self)\n self.view_selector.installEventFilter(self)\n\n self.empty_document()\n\n self.setAcceptDrops(True)\n\n if filename:\n self.open_file(filename)\n\n def modified_changed(self):\n \"Updated UI's modified state\"\n debug_print('MainWindow.modified_changed')\n self.setWindowModified(self.model.is_modified)\n\n def eventFilter(self, obj, event):\n \"Event filter that accepts drag-drop events\"\n if event.type() in (QEvent.DragEnter, QEvent.Drop):\n return True\n else:\n return super(MainWindow, self).eventFilter(obj, event)\n\n @report_to_user\n def open_file(self, path=None):\n \"\"\"Opens path, which can be None, the path to an inselect document or\n the path to an image file. If None, the user is prompted to select a\n file.\n\n * If a .inselect file, the file is opened\n * If an image file for which a .inselect document already exists, the\n .inselect file is opened\n * If a _thumbnail.jpg file corresponding to an existing .inselect file,\n the .inselect file is opened\n * If an image file, a new .inselect file is created and opened\n \"\"\"\n debug_print(u'MainWindow.open_file [{0}]'.format(path))\n\n if not path:\n folder = QSettings().value(\n 'working_directory',\n QDesktopServices.storageLocation(QDesktopServices.DocumentsLocation)\n )\n\n path, selectedFilter = QtGui.QFileDialog.getOpenFileName(\n self, \"Open\", folder, self.DOCUMENT_FILE_FILTER)\n\n # path will be None if user cancelled getOpenFileName\n if path:\n path = Path(path)\n\n # What type of file did the user select?\n document_path = image_path = None\n if InselectDocument.EXTENSION == path.suffix:\n # An inselect document\n document_path = path\n elif IMAGE_SUFFIXES_RE.match(path.name):\n # Compute the path to the inselect document (which may or\n # may not already exist) of the image file\n doc_of_image = path.name.replace(InselectDocument.THUMBNAIL_SUFFIX, u'')\n doc_of_image = path.parent / doc_of_image\n doc_of_image = doc_of_image.with_suffix(InselectDocument.EXTENSION)\n if doc_of_image.is_file():\n # An image file corresponding to an existing .inselect file\n document_path = doc_of_image\n else:\n # An image file\n image_path = path\n\n if not self.close_document(document_path):\n # User does not want to close the existing document\n pass\n elif document_path:\n # Open the .inselect document\n debug_print('Opening inselect document [{0}]'.format(document_path))\n self.open_document(path=document_path)\n elif image_path:\n msg = u'Creating new inselect document for image [{0}]'\n debug_print(msg.format(image_path))\n self.new_document(image_path)\n else:\n raise InselectError('Unknown file type [{0}]'.format(path))\n\n def new_document(self, path, default_metadata_items=None):\n \"\"\"Creates and opens a new inselect document for the scanned image\n given in path\n \"\"\"\n debug_print('MainWindow.new_document [{0}]'.format(path))\n\n path = Path(path)\n if not path.is_file():\n raise InselectError(u'Image file [{0}] does not exist'.format(path))\n else:\n # Callable for worker thread\n thumbnail_width = user_template_choice().current.thumbnail_width_pixels\n\n class NewDoc(object):\n def __init__(self, image, default_metadata_items):\n self.image = image\n self.default_metadata_items = default_metadata_items\n self.document = None\n\n def __call__(self, progress):\n progress('Creating thumbnail of scanned image')\n doc = ingest_image(self.image, self.image.parent,\n thumbnail_width,\n self.default_metadata_items,\n cookie_cutter_choice().current)\n self.document = doc\n\n self.run_in_worker(NewDoc(path, default_metadata_items),\n 'New document',\n self.new_document_finished)\n\n def new_document_finished(self, operation):\n \"\"\"Called when new_document worker has finished\n \"\"\"\n debug_print('MainWindow.new_document_finished')\n\n document = operation.document\n document_path = document.document_path\n QSettings().setValue('working_directory', str(document_path.parent))\n\n self.open_document(document=document)\n\n msg = u'New Inselect document [{0}] created in [{1}]'\n msg = msg.format(document_path.stem, document_path.parent)\n QMessageBox.information(self, \"Document created\", msg)\n\n def _sync_recent_documents_actions(self):\n \"Synchronises the 'recent documents' actions\"\n debug_print('MainWindow._sync_recent_documents_actions')\n recent = RecentDocuments().read_paths()\n if not recent:\n # No recent documents - a single disabled action with placeholder\n # text\n self.recent_doc_actions[0].setEnabled(False)\n self.recent_doc_actions[0].setText('No recent documents')\n self.recent_doc_actions[0].setVisible(True)\n hide_actions_after = 1\n elif len(recent) > len(self.recent_doc_actions):\n msg = 'Unexpected number of recent documents [{0}]'\n raise ValueError(msg.format(len(recent)))\n else:\n # Show as many actions as there are recent documents\n for index, path, action in izip(count(), recent, self.recent_doc_actions):\n action.setEnabled(True)\n action.setText(path.stem)\n action.setToolTip(str(path))\n action.setVisible(True)\n hide_actions_after = 1 + index\n\n # Hide all actions after and including 'hide_actions_after'\n for action in self.recent_doc_actions[hide_actions_after:]:\n action.setVisible(False)\n action.setText('')\n\n @report_to_user\n def open_recent(self, index):\n debug_print('MainWindow._open_recent [{0}]'.format(index))\n recent = RecentDocuments().read_paths()\n self.open_file(recent[index])\n\n def open_document(self, path=None, document=None):\n \"\"\"Either loads the inselect document from path or uses the existing\n InselectDocument given in document.\n \"\"\"\n if path and document:\n raise ValueError('Both path and document given')\n\n if path:\n path = Path(path)\n document = InselectDocument.load(path)\n else:\n path = document.document_path\n\n debug_print('MainWindow.open_document [{0}]'.format(path))\n QSettings().setValue(\"working_directory\", str(path.parent))\n\n self.document = document\n self.document_path = path\n self.model.from_document(self.document)\n\n self.setWindowTitle('')\n self.setWindowFilePath(str(self.document_path))\n self.info_widget.set_document(self.document)\n\n RecentDocuments().add_path(path)\n self._sync_recent_documents_actions()\n\n self.sync_ui()\n\n if not is_writable(path):\n msg = (u'The file [{0}] is read-only.\\n\\n'\n u'You will not be able to save any changes that you make.')\n msg = msg.format(path.name)\n QMessageBox.warning(self, \"Document is read-only\", msg)\n\n @report_to_user\n def save_document(self):\n \"\"\"Saves the document\n \"\"\"\n debug_print('MainWindow.save_document')\n\n self.model.to_document(self.document)\n self.document.save()\n self.model.set_modified(False)\n self.info_widget.set_document(self.document)\n\n def _prompt_validation_problems(self, problems, title, question):\n \"\"\"Prompts the user with the question and the list of validation\n problems. Returns the result of QMessageBox.exec_().\n \"\"\"\n box = QMessageBox(QMessageBox.Question, title, '',\n QMessageBox.No | QMessageBox.Yes)\n box.setDefaultButton(QtGui.QMessageBox.No)\n\n SHOW_AT_MOST = 5\n report_problems = problems[:SHOW_AT_MOST]\n if SHOW_AT_MOST <= len(problems):\n msg = ('The document contains {n_problems} validation problems. '\n 'The first {show_at_most} are shown below. Click \"Show '\n 'details\" to see all of them.\\n'\n '\\n'\n '{problems}\\n'\n '\\n'\n '{question}')\n box.setDetailedText('\\n'.join(problems))\n else:\n msg = ('The document contains {n_problems} validation problems:\\n'\n '\\n'\n '{problems}\\n'\n '\\n'\n '{question}')\n\n box.setText(msg.format(n_problems=len(problems),\n show_at_most=SHOW_AT_MOST,\n problems='\\n'.join(report_problems),\n question=question))\n\n return box.exec_()\n\n @report_to_user\n def save_crops(self, user_template=None):\n \"\"\"Saves cropped object images\n \"\"\"\n debug_print('MainWindow.save_crops')\n\n if user_template:\n export = DocumentExport(user_template)\n else:\n export = DocumentExport(user_template_choice().current)\n\n self.model.to_document(self.document)\n\n crops_dir = export.crops_dir(self.document)\n\n res = QMessageBox.Yes\n if crops_dir.is_dir():\n msg = 'Overwrite the existing object images?'\n res = QMessageBox.question(self, 'Save object images?',\n msg, QMessageBox.No, QMessageBox.Yes)\n\n validation = export.validation_problems(self.document)\n if QMessageBox.Yes == res and validation and validation.any_problems:\n res = self._prompt_validation_problems(\n list(format_validation_problems(validation)),\n 'Save object images?',\n 'Would you like to save the object images?')\n\n if QMessageBox.Yes == res:\n def save_crops(progress):\n progress('Loading full-resolution scanned image')\n self.document.scanned.array\n\n progress('Saving crops')\n export.save_crops(self.document, progress)\n\n def completed(operation):\n QMessageBox.information(self, \"Crops saved\", msg)\n\n msg = \"{0} crops saved in {1}\"\n msg = msg.format(self.document.n_items, crops_dir)\n self.run_in_worker(save_crops, 'Save crops', completed)\n\n @report_to_user\n def export_csv(self, user_template=None):\n debug_print('MainWindow.export_csv')\n\n if user_template:\n export = DocumentExport(user_template)\n else:\n export = DocumentExport(user_template_choice().current)\n\n self.model.to_document(self.document)\n path = export.csv_path(self.document)\n\n res = QMessageBox.Yes\n existing_csv = path.is_file()\n\n if existing_csv:\n msg = 'Overwrite the existing CSV file?'\n res = QMessageBox.question(self, 'Export CSV file?',\n msg, QMessageBox.No, QMessageBox.Yes)\n\n validation = export.validation_problems(self.document)\n if QMessageBox.Yes == res and validation and validation.any_problems:\n res = self._prompt_validation_problems(\n list(format_validation_problems(validation)),\n 'Export CSV file?',\n 'Would you like to export a CSV file?')\n\n if QMessageBox.Yes == res:\n export.export_csv(self.document)\n msg = \"Data for {0} boxes written to {1}\"\n msg = msg.format(self.document.n_items, path)\n QMessageBox.information(self, \"CSV saved\", msg)\n\n @report_to_user\n def save_screengrab(self):\n \"\"\"Prompts the user for the image file path to which to a screenshot\n will be saved.\n \"\"\"\n debug_print('MainWindow.save_screengrab')\n\n # Do not use OpenCV to write the image because the conversion from Qt's\n # QPixmap to a numpy array is non-trivial\n # Investigate https://pypi.python.org/pypi/qimage2ndarray/0.2\n\n # Work out the supported image file extensions\n extensions = QtGui.QImageWriter.supportedImageFormats()\n extensions = sorted([str(e).lower() for e in extensions])\n extensions = ['*.{0}'.format(e) for e in extensions]\n\n # Only some of these make sense. For example, do not offer the user\n # the change to save an eps, which is a format supported by QImageWriter\n extensions = sorted(set(extensions).intersection(IMAGE_PATTERNS))\n\n filter = 'Images ({0})'.format(' '.join(extensions))\n\n if self.document_path:\n # Default name is the name of this document with '_screengrab' appended\n default_fname = u'{0}_screengrab'.format(self.document_path.stem)\n else:\n default_fname = u'inselect_screengrab'\n\n # Default suffix is jpg, if available\n for e in ('.jpg', '.jpeg', '.png'):\n if '*{0}'.format(e) in extensions:\n default_extension = e\n break\n else:\n # Use the first available extension\n default_extension = extensions[0][1:]\n\n default_fname = Path(default_fname).with_suffix(default_extension)\n\n # Default folder is the user's documents folder\n default_dir = QDesktopServices.storageLocation(\n QDesktopServices.DocumentsLocation\n )\n\n debug_print(u'Default screengrab dir [{0}]'.format(default_dir))\n debug_print(u'Default screengrab fname [{0}]'.format(default_fname))\n path, selected_filter = QtGui.QFileDialog.getSaveFileName(\n self, \"Save image file of boxes view\",\n unicode(Path(default_dir) / default_fname),\n filter=filter\n )\n\n if path:\n pm = QtGui.QPixmap.grabWidget(self)\n\n # Write using QImageWriter, which makes richer error information\n # avaible than QPixmap.save()\n writer = QtGui.QImageWriter(path)\n if not writer.write(pm.toImage()):\n msg = 'An error occurred writing to [{0}]: [{1}]'\n raise InselectError(msg.format(path, writer.errorString()))\n else:\n debug_print('BoxesView.save_screengrab [{0}]'.format(path))\n\n @report_to_user\n def close_document(self, document_to_open=None):\n \"\"\"Closes the document and returns True if not modified or if modified\n and user does not cancel.\n\n If document_to_open is given and is the same as self.document_path then\n one of two things will happen. If the model is not modified, the user\n is informed and False is returned. If the model is modified, the user is\n asked if they would like to discard their changes and revert to the\n version on the filesystem. If the user selects No, False is returned.\n If the user selects Yes, the document is closed and True is returned.\n\n In all cases, if the user selects cancels then the document is not\n closes and False is returned.\n \"\"\"\n debug_print('MainWindow.close_document', document_to_open)\n # Must make sure that files exist before calling resolve\n if (self.document_path and self.document_path.is_file() and\n document_to_open and document_to_open.is_file() and\n self.document_path.resolve() == document_to_open.resolve()):\n if self.model.is_modified:\n # Ask the user if they work like to revert\n msg = (u'The document [{0}] is already open and has been '\n u'changed. Would you like to discard your changes and '\n u'revert to the previous version?')\n msg = msg.format(self.document_path.stem)\n res = QMessageBox.question(self, u'Discard changes?', msg,\n (QMessageBox.Yes | QMessageBox.No),\n QMessageBox.No)\n close = QMessageBox.Yes == res\n else:\n # Let the user know that the document is already open and\n # take no action\n msg = u'The document [{0}] is already open'\n msg = msg.format(self.document_path.stem)\n QMessageBox.information(self, 'Document already open', msg,\n QMessageBox.Ok)\n close = False\n elif self.model.is_modified:\n # Ask the user if they work like to save before closing\n res = QMessageBox.question(\n self, 'Save document?',\n 'Save the document before closing?',\n (QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel),\n QMessageBox.Yes\n )\n\n if QMessageBox.Yes == res:\n self.save_document()\n\n # Answering Yes or No means the document will be closed\n close = QMessageBox.Cancel != res\n else:\n # The document is not modified so it is OK to close it\n close = True\n\n if close:\n self.empty_document()\n\n return close\n\n @report_to_user\n def empty_document(self):\n \"\"\"Creates an empty document\n \"\"\"\n debug_print('MainWindow.empty_document')\n # Clear selection before closing for performance reasons\n self.select_none()\n self.document = None\n self.document_path = None\n self.plugin_image = None\n self.plugin_image_visible = False\n self.model.clear()\n\n self.setWindowTitle('Inselect')\n self.setWindowFilePath(None)\n self.info_widget.set_document(None)\n\n self.sync_ui()\n\n def closeEvent(self, event):\n \"\"\"QWidget virtual\n \"\"\"\n debug_print('MainWindow.closeEvent')\n if self.close_document():\n # User wants to close\n self.write_geometry_settings()\n event.accept()\n else:\n # User does not want to close\n event.ignore()\n\n @report_to_user\n def zoom_in(self):\n self.boxes_view.zoom_in()\n\n @report_to_user\n def zoom_out(self):\n self.boxes_view.zoom_out()\n\n @report_to_user\n def toggle_zoom(self):\n self.boxes_view.toggle_zoom()\n\n @report_to_user\n def zoom_home(self):\n self.boxes_view.zoom_home()\n\n @report_to_user\n def show_grid(self):\n self.view_object.show_grid()\n\n @report_to_user\n def show_expanded(self):\n self.view_object.show_expanded()\n\n @report_to_user\n def about(self):\n show_about_box(self)\n\n def run_in_worker(self, operation, name, complete_fn=None):\n \"\"\"Runs the callable operation in a worker thread. The callable\n complete_fn is called when the operation has finished.\n \"\"\"\n debug_print(\"MainWindow.run_in_worker\")\n\n if self.running_operation:\n debug_print('Operation already running')\n else:\n worker = WorkerThread(operation,\n name,\n self)\n worker.completed.connect(self.worker_finished)\n\n # TODO Make this a namedtuple\n self.running_operation = (operation, name, complete_fn, worker)\n worker.start()\n\n @report_to_user\n def worker_finished(self, user_cancelled, error_message):\n debug_print(\"MainWindow.worker_finished\", user_cancelled,\n error_message)\n\n operation, name, complete_fn, worker = self.running_operation\n self.running_operation = None\n\n if user_cancelled:\n QMessageBox.information(self, 'Cancelled',\n \"'{0} cancelled'\".format(name))\n elif error_message:\n QMessageBox.information(\n self,\n \"An error occurred running '{0}'\".format(name),\n error_message + '\\n\\nExisting data has not been altered'\n )\n else:\n if complete_fn:\n complete_fn(operation)\n self.sync_ui()\n\n @report_to_user\n def run_plugin(self, plugin_number):\n \"\"\"Passes each cropped object image through plugin\n \"\"\"\n debug_print(\"MainWindow.run_plugin\")\n\n if plugin_number < 0 or plugin_number > len(self.plugins):\n raise ValueError('Unexpected plugin [{0}]'.format(plugin_number))\n else:\n plugin = self.plugins[plugin_number]\n\n self.model.to_document(self.document)\n\n # Create the plugin\n operation = plugin(self.document, self)\n if operation.can_be_run():\n self.run_in_worker(operation,\n plugin.NAME,\n self.plugin_finished)\n else:\n pass\n\n @report_to_user\n def show_plugin_config(self, plugin_number):\n debug_print(\"MainWindow.show_plugin_config\")\n\n if (plugin_number < 0 or plugin_number > len(self.plugins) or\n self.plugin_config_ui_actions[plugin_number] is None):\n raise ValueError('Unexpected plugin [{0}]'.format(plugin_number))\n else:\n self.plugins[plugin_number].config(self)\n\n def plugin_finished(self, operation):\n \"\"\"Called when a plugin has finished running in a worker thread\n \"\"\"\n debug_print(\"MainWindow.plugin_finished\")\n\n if hasattr(operation, 'items'):\n self.model.set_new_boxes(operation.items)\n\n if hasattr(operation, 'display'):\n # An image that can be displayed instead of the main image\n display = operation.display\n self.plugin_image = QtGui.QPixmap.fromImage(qimage_of_bgr(display))\n self.update_boxes_display_pixmap()\n\n @report_to_user\n def select_all(self):\n \"\"\"Selects all boxes in the model\n \"\"\"\n sm = self.view_object.selectionModel()\n m = self.model\n sm.select(QtGui.QItemSelection(m.index(0, 0), m.index(m.rowCount()-1, 0)),\n QtGui.QItemSelectionModel.Select)\n\n @report_to_user\n def select_none(self):\n sm = self.view_object.selectionModel()\n sm.select(QtGui.QItemSelection(), QtGui.QItemSelectionModel.Clear)\n\n @report_to_user\n def delete_selected(self):\n \"\"\"Deletes the selected boxes\n \"\"\"\n debug_print('MainWindow.delete_selected')\n\n # Delete contiguous blocks of rows\n selected = self.view_object.selectionModel().selectedIndexes()\n selected = sorted([i.row() for i in selected])\n\n # Remove blocks in reverse order so that row indices are not invalidated\n # TODO LH We shouldn't need to remove blocks in reverse order - stems\n # from crummy GraphicsItemView\n for first, n_rows in reversed(list(contiguous(selected))):\n self.model.removeRows(first, n_rows)\n\n # Prevent object view from scrolling to the top of the view. The natural\n # place to do this is within ObjectView but I was unable to get that\n # solution to work.\n self.view_object.scrollTo(self.view_object.currentIndex())\n\n @report_to_user\n def select_next_prev(self, next):\n \"\"\"Selects the next box in the mode if next is True, the previous\n box in the model if next if False.\n \"\"\"\n sm = self.view_object.selectionModel()\n current = sm.currentIndex()\n current = current.row() if current else -1\n\n select = current + (1 if next else -1)\n if select == self.model.rowCount():\n select = 0\n elif -1 == select:\n select = self.model.rowCount()-1\n\n debug_print('Will move selection [{0}] from [{1}]'.format(current, select))\n select = self.model.index(select, 0)\n sm.select(QtGui.QItemSelection(select, select),\n QtGui.QItemSelectionModel.ClearAndSelect)\n sm.setCurrentIndex(select, QtGui.QItemSelectionModel.Current)\n\n @report_to_user\n def select_by_size_step(self, larger=False):\n \"\"\"Step the 'select by size' slider\n \"\"\"\n self.view_selector.single_step(larger)\n\n @report_to_user\n def rotate90(self, clockwise):\n \"\"\"Rotates the selected boxes 90 either clockwise or counter-clockwise.\n \"\"\"\n debug_print('MainWindow.rotate')\n value = 90 if clockwise else -90\n selected = self.view_object.selectionModel().selectedIndexes()\n for index in selected:\n current = index.data(RotationRole)\n self.model.setData(index, current + value, RotationRole)\n\n def update_boxes_display_pixmap(self):\n \"\"\"Sets the pixmap in the boxes view\n \"\"\"\n pixmap = self.plugin_image if self.plugin_image_visible else None\n self.view_graphics_item.show_alternative_pixmap(pixmap)\n\n @report_to_user\n def toggle_plugin_image(self):\n \"\"\"Action method to switch between display of the last plugin's\n information image (if any) and the actual image.\n \"\"\"\n self.plugin_image_visible = not self.plugin_image_visible\n self.update_boxes_display_pixmap()\n\n def create_menu_actions(self):\n \"\"\"Creates actions that are associated with menu items\n \"\"\"\n # File menu\n self.open_action = QAction(\n \"&Open...\", self,\n shortcut=QtGui.QKeySequence.Open, triggered=self.open_file,\n icon=self.style().standardIcon(QtGui.QStyle.SP_DialogOpenButton)\n )\n self.copy_to_new_document_action = QAction(\n \"Copy to new document...\", self,\n triggered=self.copy_to_new_document\n )\n self.save_action = QAction(\n \"&Save\", self,\n shortcut=QtGui.QKeySequence.Save, triggered=self.save_document,\n icon=self.style().standardIcon(QtGui.QStyle.SP_DialogSaveButton)\n )\n self.save_crops_action = QAction(\n \"&Save crops\", self,\n triggered=self.save_crops\n )\n self.export_csv_action = QAction(\n \"&Export CSV\", self,\n triggered=self.export_csv\n )\n self.save_screengrab_action = QAction(\n \"Save screen grab\", self,\n triggered=self.save_screengrab\n )\n self.close_action = QAction(\n \"&Close\", self,\n shortcut=QtGui.QKeySequence.Close, triggered=self.close_document\n )\n self.exit_action = QAction(\n \"E&xit\", self,\n shortcut=QtGui.QKeySequence.Quit, triggered=self.close\n )\n\n if 'win32' == sys.platform:\n # Support ctrl+w and ctrl+q on Windows\n self.close_action.setShortcuts(['ctrl+w',\n self.close_action.shortcut()])\n self.exit_action.setShortcuts(['ctrl+q',\n self.exit_action.shortcut()])\n\n self.recent_doc_actions = [None] * RecentDocuments.MAX_RECENT_DOCS\n for index in xrange(RecentDocuments.MAX_RECENT_DOCS):\n self.recent_doc_actions[index] = QAction(\n 'Recent document', self,\n triggered=partial(self.open_recent, index)\n )\n self._sync_recent_documents_actions()\n\n # Edit menu\n self.select_all_action = QAction(\n \"Select &All\", self,\n shortcut=QtGui.QKeySequence.SelectAll, triggered=self.select_all\n )\n # QT does not provide a 'select none' key sequence\n self.select_none_action = QAction(\n \"Select &None\", self,\n shortcut=\"ctrl+D\", triggered=self.select_none\n )\n self.next_box_action = QAction(\n \"Next box\", self, shortcut=\"ctrl+N\",\n triggered=partial(self.select_next_prev, next=True)\n )\n self.previous_box_action = QAction(\n \"Previous box\", self,\n shortcut=\"ctrl+P\",\n triggered=partial(self.select_next_prev, next=False)\n )\n self.select_by_size_larger_action = QAction(\n \"Select increasing size\", self, shortcut=\"ctrl+>\",\n triggered=partial(self.select_by_size_step, larger=True)\n )\n self.select_by_size_smaller_action = QAction(\n \"Select decreasing size\", self, shortcut=\"ctrl+<\",\n triggered=partial(self.select_by_size_step, larger=False)\n )\n\n self.delete_action = QAction(\n \"&Delete selected\", self,\n shortcut=QtGui.QKeySequence.Delete,\n triggered=self.delete_selected\n )\n # CMD + backspace is the Mac OS X shortcut for delete. Some Mac\n # keyboards have a Delete key, so this standard shortcut is also\n # included.\n if 'darwin' == sys.platform:\n self.delete_action.setShortcuts(['ctrl+backspace',\n self.delete_action.shortcut()])\n\n self.rotate_clockwise_action = QAction(\n \"Rotate clockwise\", self, shortcut=\"ctrl+R\",\n triggered=partial(self.rotate90, clockwise=True)\n )\n self.rotate_counter_clockwise_action = QAction(\n \"Rotate counter-clockwise\", self, shortcut=\"ctrl+L\",\n triggered=partial(self.rotate90, clockwise=False)\n )\n\n # Plugins\n # Plugin shortcuts start at F5\n shortcut_offset = 5\n for index, plugin in enumerate(self.plugins):\n action = QAction(plugin.NAME, self,\n triggered=partial(self.run_plugin, index))\n shortcut_fkey = index + shortcut_offset\n if shortcut_fkey < 13:\n # Keyboards typically have 12 function keys\n action.setShortcut('f{0}'.format(shortcut_fkey))\n if hasattr(plugin, 'icon'):\n action.setIcon(plugin.icon())\n self.plugin_actions[index] = action\n if hasattr(plugin, 'config'):\n ui_action = QAction(\n u\"Configure '{0}'...\".format(plugin.NAME), self,\n triggered=partial(self.show_plugin_config, index)\n )\n # Force menu items to appear on Mac\n ui_action.setMenuRole(QAction.NoRole)\n self.plugin_config_ui_actions[index] = ui_action\n\n # View menu\n # It is tempting to set the trigger to\n # partial(self.tabs.setCurrentIndex, 0) but this causes a segfault when\n # the application exits on linux. It also means that exceptions will be\n # silently swallowed.\n self.boxes_view_action = QAction(\n \"&Boxes\", self, checkable=True, triggered=partial(self.show_tab, 0),\n )\n self.boxes_view_action.setShortcuts(['ctrl+1', 'ctrl+b'])\n self.metadata_view_action = QAction(\n \"Ob&jects\", self, checkable=True,\n triggered=partial(self.show_tab, 1)\n )\n self.metadata_view_action.setShortcuts(['ctrl+2', 'ctrl+j'])\n\n # FullScreen added in Qt 5.something\n # https://qt.gitorious.org/qt/qtbase-miniak/commit/1ef8a6d\n if not hasattr(QtGui.QKeySequence, 'FullScreen'):\n if 'darwin' == sys.platform:\n KeySequenceFullScreen = 'shift+ctrl+f'\n else:\n KeySequenceFullScreen = 'f11'\n else:\n KeySequenceFullScreen = QtGui.QKeySequence.FullScreen\n self.full_screen_action = QAction(\n \"&Full screen\", self, shortcut=KeySequenceFullScreen,\n triggered=self.toggle_full_screen\n )\n\n self.zoom_in_action = QAction(\n \"Zoom &In\", self, shortcut=QtGui.QKeySequence.ZoomIn,\n triggered=self.zoom_in,\n icon=self.style().standardIcon(QtGui.QStyle.SP_ArrowUp)\n )\n self.zoom_out_action = QAction(\n \"Zoom &Out\", self, shortcut=QtGui.QKeySequence.ZoomOut,\n triggered=self.zoom_out,\n icon=self.style().standardIcon(QtGui.QStyle.SP_ArrowDown)\n )\n self.toogle_zoom_action = QAction(\n \"&Toogle Zoom\", self, shortcut='Z', triggered=self.toggle_zoom\n )\n self.zoom_home_action = QAction(\n \"Fit To Window\", self,\n shortcut=QtGui.QKeySequence.MoveToStartOfDocument,\n triggered=self.zoom_home\n )\n # TODO LH Is F3 (normally meaning 'find next') really the right\n # shortcut for the 'toggle plugin image' action?\n self.toggle_plugin_image_action = QAction(\n \"&Display plugin image\", self, shortcut=\"f3\",\n triggered=self.toggle_plugin_image,\n statusTip=\"Display plugin image\", checkable=True\n )\n\n self.show_object_grid_action = QAction(\n 'Show grid', self, shortcut='ctrl+G', triggered=self.show_grid\n )\n self.show_object_expanded_action = QAction(\n 'Show expanded', self,\n shortcut='ctrl+E', triggered=self.show_expanded\n )\n\n # Colours\n for name in colour_scheme_choice().colour_scheme_names():\n action = QAction(name, self, checkable=True,\n triggered=partial(self.set_colour_scheme, name))\n self.colour_scheme_actions.append(action)\n\n # Help menu\n self.about_action = QAction(\"&About\", self, triggered=self.about)\n\n def create_non_menu_actions(self):\n \"\"\"Creates actions that are not associated with menu items\n \"\"\"\n # Menu-less actions\n # Shortcuts for next / previous tab\n self.previous_tab_action = QAction(\n \"Previous tab\", self, triggered=partial(self.next_previous_tab, False),\n shortcut='ctrl+PgDown'\n )\n self.next_tab_action = QAction(\n \"Next tab\", self, triggered=partial(self.next_previous_tab, True),\n shortcut='ctrl+PgUp'\n )\n\n # Mac also uses these funny shortcuts\n if 'darwin' == sys.platform:\n self.previous_tab_action.setShortcuts(\n ['shift+ctrl+[', self.previous_tab_action.shortcut()]\n )\n self.next_tab_action.setShortcuts(\n ['shift+ctrl+]', self.next_tab_action.shortcut()]\n )\n\n self.addAction(self.previous_tab_action)\n self.addAction(self.next_tab_action)\n\n def create_toolbar(self):\n \"\"\"Creates the toolbar\n \"\"\"\n self.toolbar = self.addToolBar(\"Edit\")\n self.toolbar.addAction(self.open_action)\n self.toolbar.addAction(self.save_action)\n for action in filter(lambda a: a.icon(), self.plugin_actions):\n self.toolbar.addAction(action)\n self.toolbar.addAction(self.zoom_in_action)\n self.toolbar.addAction(self.zoom_out_action)\n self.toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)\n self.toolbar.addWidget(self.cookie_cutter_widget)\n\n self.toolbar.addSeparator()\n self.toolbar.addWidget(self.view_selector.widget)\n\n def create_menus(self):\n \"\"\"Create menu items\n \"\"\"\n self._file_menu = QMenu(\"&File\", self)\n self._file_menu.addAction(self.open_action)\n recent = self._file_menu.addMenu('Recent documents')\n for action in self.recent_doc_actions:\n recent.addAction(action)\n self._file_menu.addAction(self.copy_to_new_document_action)\n self._file_menu.addAction(self.save_action)\n self._file_menu.addAction(self.close_action)\n self._file_menu.addSeparator()\n self._file_menu.addAction(self.save_crops_action)\n self._file_menu.addAction(self.export_csv_action)\n self._file_menu.addSeparator()\n self._file_menu.addAction(self.save_screengrab_action)\n self._file_menu.addSeparator()\n self._file_menu.addAction(self.exit_action)\n\n self._edit_menu = QMenu(\"&Edit\", self)\n self._edit_menu.addAction(self.select_all_action)\n self._edit_menu.addAction(self.select_none_action)\n self._edit_menu.addAction(self.delete_action)\n self._edit_menu.addSeparator()\n self._edit_menu.addAction(self.next_box_action)\n self._edit_menu.addAction(self.previous_box_action)\n self._edit_menu.addAction(self.select_by_size_larger_action)\n self._edit_menu.addAction(self.select_by_size_smaller_action)\n self._edit_menu.addSeparator()\n self._edit_menu.addAction(self.rotate_clockwise_action)\n self._edit_menu.addAction(self.rotate_counter_clockwise_action)\n self._edit_menu.addSeparator()\n user_template_popup = self._edit_menu.addMenu('Metadata template')\n self.view_metadata.popup_button.inject_actions(user_template_popup)\n self._edit_menu.addSeparator()\n cookie_cutter_popup = self._edit_menu.addMenu('Cookie cutter')\n self.cookie_cutter_widget.inject_actions(cookie_cutter_popup)\n self._edit_menu.addSeparator()\n for action in self.plugin_actions:\n self._edit_menu.addAction(action)\n for action in (a for a in self.plugin_config_ui_actions if a):\n self._edit_menu.addAction(action)\n\n self._view_menu = QMenu(\"&View\", self)\n self._view_menu.addAction(self.boxes_view_action)\n self._view_menu.addAction(self.metadata_view_action)\n self._view_menu.addSeparator()\n self._view_menu.addAction(self.full_screen_action)\n self._view_menu.addSeparator()\n self._view_menu.addAction(self.zoom_in_action)\n self._view_menu.addAction(self.zoom_out_action)\n self._view_menu.addAction(self.toogle_zoom_action)\n self._view_menu.addAction(self.zoom_home_action)\n self._view_menu.addAction(self.toggle_plugin_image_action)\n self._view_menu.addSeparator()\n self._view_menu.addAction(self.show_object_grid_action)\n self._view_menu.addAction(self.show_object_expanded_action)\n self._view_menu.addSeparator()\n colours_popup = self._view_menu.addMenu('Colour scheme')\n for action in self.colour_scheme_actions:\n colours_popup.addAction(action)\n\n self._help_menu = QMenu(\"&Help\", self)\n self._help_menu.addAction(self.about_action)\n\n self.menuBar().addMenu(self._file_menu)\n self.menuBar().addMenu(self._edit_menu)\n self.menuBar().addMenu(self._view_menu)\n self.menuBar().addMenu(self._help_menu)\n\n @report_to_user\n def show_tab(self, index):\n self.tabs.setCurrentIndex(index)\n\n @report_to_user\n def next_previous_tab(self, next):\n \"\"\"Selects the next (if next if True) or previous (if next if False) tab\n \"\"\"\n select = self.tabs.currentIndex()\n select += 1 if next else -1\n if select == self.tabs.count():\n select = 0\n elif select < 0:\n select = self.tabs.count() - 1\n self.tabs.setCurrentIndex(select)\n\n def current_tab_changed(self, index):\n \"\"\"Slot for self.tabs.currentChanged() signal\n \"\"\"\n self.sync_ui()\n\n def selection_changed(self, selected, deselected):\n \"\"\"Slot for self.grid_view.selectionModel().selectionChanged() signal\n \"\"\"\n self.sync_ui()\n\n def colour_scheme_changed(self):\n \"\"\"Slot for COLOUR_SCHEME_CHANGED signal\n \"\"\"\n self.sync_ui()\n\n @report_to_user\n def toggle_full_screen(self):\n \"\"\"Toggles between full screen and normal\n \"\"\"\n if self.isFullScreen():\n self.showNormal()\n\n # When leaving full screen, Qt (or something else) forgets the\n # Mac OS X proxy icon. Clearing and then setting the window file\n # path restores the proxy icon.\n if self.document_path:\n self.setWindowFilePath('')\n self.setWindowFilePath(str(self.document_path))\n else:\n self.showFullScreen()\n\n @report_to_user\n def set_colour_scheme(self, name):\n \"Sets the colour scheme\"\n colour_scheme_choice().set_colour_scheme(name)\n\n def new_cookie_cutter(self):\n \"\"\"Slot for cookie_cutter_changed signal - sets menu and button text\n \"\"\"\n debug_print('MainWindow.new_cookie_cutter')\n self.sync_ui()\n\n @report_to_user\n def save_to_cookie_cutter(self):\n \"Saves bounding boxes to a new 'cookie cutter' file\"\n folder = unicode(cookie_cutter_choice().last_directory())\n path, selectedFilter = QtGui.QFileDialog.getSaveFileName(\n self, \"New cookie cutter\", folder,\n CookieCutterWidget.FILE_FILTER\n )\n\n if path:\n # Save the user's choice\n self.model.to_document(self.document)\n cookie_cutter_choice().create_and_use(\n [tuple(v['rect']) for v in self.document.items],\n path\n )\n\n @report_to_user\n def apply_cookie_cutter(self):\n \"\"\"Replaces existing boxes with those in cookie_cutter_choice.\n \"\"\"\n debug_print('MainWindow.apply_cookie_cutter')\n if self.model.rowCount():\n msg = ('Applying the cookie cutter will cause all boxes and '\n 'metadata to be replaced.\\n\\nContinue and replace all '\n 'existing boxes and metadata?')\n res = QMessageBox.question(self, 'Replace boxes?', msg,\n QMessageBox.No, QMessageBox.Yes)\n else:\n res = QMessageBox.Yes\n\n if QMessageBox.Yes == res:\n self.model.set_new_boxes(\n cookie_cutter_choice().current.document_items\n )\n\n @report_to_user\n def copy_to_new_document(self):\n \"\"\"Prompts the user to choose an image, creates an inselect document\n for the selected image, copies metadata from the currently open\n document to the new document and finally opens the new document\n \"\"\"\n debug_print('MainWindow.copy_to_new_document')\n\n folder = QSettings().value(\n 'working_directory',\n QDesktopServices.storageLocation(QDesktopServices.DocumentsLocation)\n )\n\n path, selectedFilter = QtGui.QFileDialog.getOpenFileName(\n self, \"Open\", folder, self.IMAGE_FILE_FILTER)\n\n # path will be None if user cancelled getOpenFileName\n if path:\n path = Path(path)\n\n # Take a copy of the metadata\n items = self.document.items\n\n if not self.close_document():\n # User does not want to close the existing document\n pass\n else:\n self.new_document(path, default_metadata_items=items)\n\n def _accept_drag_drop(self, event):\n \"\"\"If event refers to a single file that can opened, returns the path.\n Returns None otherwise.\n \"\"\"\n urls = event.mimeData().urls() if event.mimeData() else None\n path = Path(urls[0].toLocalFile()) if urls and 1 == len(urls) else None\n if path and (InselectDocument.EXTENSION == path.suffix or\n IMAGE_SUFFIXES_RE.match(path.name)):\n return urls[0].toLocalFile()\n else:\n return None\n\n def dragEnterEvent(self, event):\n \"\"\"QWidget virtual\n \"\"\"\n debug_print('MainWindow.dragEnterEvent')\n if self._accept_drag_drop(event):\n event.acceptProposedAction()\n else:\n super(MainWindow, self).dragEnterEvent(event)\n\n def dropEvent(self, event):\n \"\"\"QWidget virtual\n \"\"\"\n debug_print('MainWindow.dropEvent')\n res = self._accept_drag_drop(event)\n if res:\n event.acceptProposedAction()\n self.open_file(res)\n else:\n super(MainWindow, self).dropEvent(event)\n\n def write_geometry_settings(self):\n \"Writes geometry to settings\"\n debug_print('MainWindow.write_geometry_settings')\n\n # Taken from http://stackoverflow.com/a/8736705\n # TODO LH Test on multiple display system\n s = QSettings()\n\n s.setValue(\"mainwindow/geometry\", self.saveGeometry())\n s.setValue(\"mainwindow/pos\", self.pos())\n s.setValue(\"mainwindow/size\", self.size())\n\n def show_from_geometry_settings(self):\n debug_print('MainWindow.show_from_geometry_settings')\n\n # TODO LH What if screen resolution, desktop config change or roaming\n # profile means that restored state is outside desktop?\n s = QSettings()\n\n self.restoreGeometry(s.value(\"mainwindow/geometry\", self.saveGeometry()))\n if not (self.isMaximized() or self.isFullScreen()):\n self.move(s.value(\"mainwindow/pos\", self.pos()))\n self.resize(s.value(\"mainwindow/size\", self.size()))\n self.show()\n\n def sync_ui(self):\n \"\"\"Synchronise the user interface with the application state\n \"\"\"\n document = self.document is not None\n has_rows = self.model.rowCount() > 0 if self.model else False\n boxes_view_visible = self.boxes_view == self.tabs.currentWidget()\n objects_view_visible = self.view_object == self.tabs.currentWidget()\n has_selection = len(self.view_object.selectedIndexes()) > 0\n\n # File\n self.copy_to_new_document_action.setEnabled(document)\n self.save_action.setEnabled(document)\n self.save_crops_action.setEnabled(has_rows)\n self.export_csv_action.setEnabled(has_rows)\n self.close_action.setEnabled(document)\n\n # Edit\n self.select_all_action.setEnabled(has_rows)\n self.select_none_action.setEnabled(document)\n self.next_box_action.setEnabled(has_rows)\n self.previous_box_action.setEnabled(has_rows)\n self.select_by_size_larger_action.setEnabled(has_rows)\n self.select_by_size_smaller_action.setEnabled(has_rows)\n self.delete_action.setEnabled(has_selection)\n self.rotate_clockwise_action.setEnabled(has_selection)\n self.rotate_counter_clockwise_action.setEnabled(has_selection)\n self.cookie_cutter_widget.sync_actions(document, has_rows)\n for action in self.plugin_actions:\n action.setEnabled(document)\n\n # View\n self.boxes_view_action.setChecked(boxes_view_visible)\n self.metadata_view_action.setChecked(not boxes_view_visible)\n self.zoom_in_action.setEnabled(document and boxes_view_visible)\n self.zoom_out_action.setEnabled(document and boxes_view_visible)\n self.toogle_zoom_action.setEnabled(document and boxes_view_visible)\n self.zoom_home_action.setEnabled(document and boxes_view_visible)\n self.toggle_plugin_image_action.setEnabled(document and boxes_view_visible)\n self.show_object_grid_action.setEnabled(objects_view_visible)\n self.show_object_expanded_action.setEnabled(objects_view_visible)\n current_colour_scheme = colour_scheme_choice().current['Name']\n for action in self.colour_scheme_actions:\n action.setChecked(current_colour_scheme == action.text())\n","sub_path":"inselect/gui/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":54065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"455763806","text":"\nimport sys\nfrom os.path import abspath, dirname\nsys.path.append(dirname(dirname(abspath(__file__))))\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\nfrom flask_app.modules.model_factory import get_model\n\napp = Flask(__name__)\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n\n@app.route('/predict', methods=['POST', 'GET'])\ndef predict():\n model = get_model(request)\n prediction = model.get_prediction()\n return jsonify(prediction)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=3039)\n","sub_path":"flask_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"10512328","text":"#!/usr/bin/env python3\n#\n# Copyright 2019 ROBOTIS CO., LTD.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors: Joep Tool\n\nimport os\n\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch import LaunchDescription\nfrom launch.actions import IncludeLaunchDescription\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration\n\n\ndef generate_launch_description():\n launch_file_dir = os.path.join(get_package_share_directory('turtlebot3_gazebo'), 'launch')\n pkg_gazebo_ros = get_package_share_directory('gazebo_ros')\n\n use_sim_time = LaunchConfiguration('use_sim_time', default='true')\n x_pose = LaunchConfiguration('x_pose', default='-2.0')\n y_pose = LaunchConfiguration('y_pose', default='-0.5')\n\n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'turtlebot3_world.world'\n )\n\n gzserver_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')\n ),\n launch_arguments={'world': world}.items()\n )\n\n gzclient_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(pkg_gazebo_ros, 'launch', 'gzclient.launch.py')\n )\n )\n\n robot_state_publisher_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(launch_file_dir, 'robot_state_publisher.launch.py')\n ),\n launch_arguments={'use_sim_time': use_sim_time}.items()\n )\n\n spawn_turtlebot_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(launch_file_dir, 'spawn_turtlebot3.launch.py')\n ),\n launch_arguments={\n 'x_pose': x_pose,\n 'y_pose': y_pose\n }.items()\n )\n\n ld = LaunchDescription()\n\n # Add the commands to the launch description\n ld.add_action(gzserver_cmd)\n ld.add_action(gzclient_cmd)\n ld.add_action(robot_state_publisher_cmd)\n ld.add_action(spawn_turtlebot_cmd)\n\n return ld\n","sub_path":"launch/turtlebot3_world.launch.py","file_name":"turtlebot3_world.launch.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"45136756","text":"import os\nimport platform\nimport discord\nfrom discord.ext import commands\nfrom yaml import Loader, load\nfrom main import slash\nfrom discord_slash import SlashCommand\nfrom discord_slash import cog_ext\nrootdir=os.path.abspath(os.path.join(os.curdir))\n\nclass info(commands.Cog):\n def __init__(self, bot):\n self.bot=bot\n if platform.system() in [\"Darwin\", 'Windows']:\n with open(f\"{rootdir}/bot/localization/ru/commands.yml\", 'r') as stream:\n self.data = load(stream, Loader=Loader)\n elif platform.system()=='Linux':\n with open(\"bot/localization/ru/commands.yml\", 'r') as stream:\n self.data = load(stream, Loader=Loader)\n @commands.guild_only()\n @cog_ext.cog_slash(name='invite', description='Пригласить бота')\n async def invite(self, ctx):\n full_url = self.data['info.invite.fullurl'].format(self.bot.user.id)\n low_url = self.data['info.invite.lowurl'].format(self.bot.user.id)\n embed = discord.Embed(color=0xf0a302,\n title=self.data['info.invite.embed.title'],\n description=f'{full_url}\\n{low_url}')\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n embed.set_footer(text=f'{ctx.author} | {self.bot.user}', icon_url=ctx.author.avatar_url)\n await ctx.send(embed=embed)\ndef setup(bot):\n bot.add_cog(info(bot))\n","sub_path":"bot/cogs/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"187426546","text":"#! /usr/bin/env python\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport chronostar.traceback as tb\nfrom astropy.table import Table\nimport pickle\nimport matplotlib.pyplot as plt\nimport chronostar.retired.error_ellipse as ee\n\ninit_age = 20\ntraceback_age = 20\ninit_centre = np.array([5, 60, 25, -0.12, -3.48, 1.06])\ninit_pos_disp = 10\ninit_vel_disp = 3 \nperc_error = 0.001\n\nxycorr = 0.0\nxzcorr = 0.0\nyzcorr = 0.0\n\ncorrs = [xycorr, xzcorr, yzcorr]\nstdevs = [init_pos_disp, init_pos_disp, init_pos_disp,\n init_vel_disp, init_vel_disp, init_vel_disp]\n\ncov = np.eye(6)\ncov[np.tril_indices(3,-1)] = corrs\ncov[np.triu_indices(3,1)] = corrs\n\nfor i in range(3):\n cov[:3,i] *= stdevs[:3]\n cov[i,:3] *= stdevs[:3]\n\nfor i in range(3,6):\n cov[3:6,i] *= stdevs[3:]\n cov[i,3:6] *= stdevs[3:]\n\nprint(cov)\n\nnp.random.seed(0)\nnstars = 30 \n\n# generate initial stars from an arbitrary covariance matrix\nxyzuvw_init = np.random.multivariate_normal(\n mean=init_centre, cov=cov, size=nstars\n )\n\nsavefile = \"synt_traceback_{}Myr_{}stars.pkl\".format(\n traceback_age, nstars)\n# Generate the initial xyzuvw data of a group\n# the group is isotropic in position and velocity\n\n# Manual calculation left here for sanity reasons\nnp.random.seed(0)\nif(False):\n random = np.random.normal(size=(nstars,6))\n xyzuvw_init = np.zeros((nstars,6))\n xyzuvw_init[:,:3] = random[:,:3]*init_pos_disp\n xyzuvw_init[:,3:] = random[:,3:]*init_vel_disp\n xyzuvw_init = xyzuvw_init + init_centre\n\n# Project forward in time\nxyzuvw_now = np.zeros((nstars,6))\nfor i in range(nstars):\n xyzuvw_now[i] = tb.trace_forward(xyzuvw_init[i], init_age,\n solarmotion=None)\n\n# Convert to radecpipmrv coordinates:\nsky_coord_now = np.zeros((nstars,6))\nfor i in range(nstars):\n sky_coord_now[i] = tb.xyzuvw_to_skycoord(\n xyzuvw_now[i], solarmotion='schoenrich', reverse_x_sign=True\n )\n\n# compile sky coordinates into a table with some form of error\n\nids = np.arange(nstars)\nt = Table(\n [\n ids,\n sky_coord_now[:,0],\n sky_coord_now[:,1],\n sky_coord_now[:,2],\n sky_coord_now[:,2] * perc_error,\n sky_coord_now[:,5],\n sky_coord_now[:,5] * perc_error,\n sky_coord_now[:,3],\n sky_coord_now[:,3] * perc_error,\n sky_coord_now[:,4],\n sky_coord_now[:,4] * perc_error\n ],\n names=('Name', 'RAdeg','DEdeg','Plx','e_Plx','RV','e_RV',\n 'pmRA','e_pmRA','pmDE','e_pmDE')\n )\ntimes = np.linspace(0,30,31)\n\n# perform traceback\ntraceback = tb.traceback(t, times, savefile=\"data/\"+savefile)\nstars, ages, xyzuvw, xyzuvw_cov = pickle.load(open(\"data/\"+savefile, 'r'))\n\n# plot result\n# for each star, plot the traceback curve to its initial age\n# as well as a covariance matrix at it's \"true\" age\nplt.clf()\nfor i in range(nstars):\n x, y = xyzuvw[i,:21,:2].T\n plt.plot(x,y, 'r')\n\n pltcov = xyzuvw_cov[i,20,:2,:2]\n\n ee.plot_cov_ellipse(\n cov=pltcov,\n pos=xyzuvw[i,20,:2],\n nstd=10,\n )\nplt.xlabel(\"X [pc]\")\nplt.ylabel(\"Y [pc]\")\nplt.show()\n","sub_path":"playground/junkpile/synthesise_tb.py","file_name":"synthesise_tb.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"215850111","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nimport bpy\nimport imp \n \ncurve_helper = imp.load_source('curve_helper','curve_helper.py')\nmaterial_helper = imp.load_source('material_helper','material_helper.py')\ngeometry_helper = imp.load_source('geometry_helper','geometry_helper.py')\nhull_maker = imp.load_source('hull_maker','hull_maker.py')\n\nthe_hull=hull_maker.hull_maker(length=11.4,width=3.9,height=3.6)\n\nthe_hull.make_hull_object()\n\nnew_chine=hull_maker.chine_helper(the_hull)\n\nnew_chine.rotation=[180,0,0]\nnew_chine.offset=[0,-0.06,-0.5]\nnew_chine.name=\"top\"\nnew_chine.longitudal_count=1\nnew_chine.longitudal_thickness=0.05\nnew_chine.longitudal_width=-0.15\nnew_chine.make_chine()\n\nnew_chine.rotation=[-39,0,0]\nnew_chine.offset=[0,-0.2,-0.4]\nnew_chine.name=\"mid\"\nnew_chine.make_chine()\n\nnew_chine.longitudal_count=0\nnew_chine.rotation=[45,0,0]\nnew_chine.offset=[0,0,-0.31]\nnew_chine.name=\"upper\"\nnew_chine.make_chine()\n\n\nnew_chine.longitudal_count=1\nnew_chine.rotation=[-79,0,0]\nnew_chine.offset=[0,0,0]\nnew_chine.name=\"low\"\nnew_chine.curve_length=the_hull.hull_length*1.5\nnew_chine.curve_width=1.6\nnew_chine.make_chine()\n\nnew_chine.rotation=[90,0,0]\nnew_chine.offset=[0,0,-0.7]\nnew_chine.name=\"roof\"\nnew_chine.curve_width=0.8\n#new_chine.curve_angle=55\nnew_chine.symmetrical=False\nnew_chine.make_chine()\n\n# ================ modify hull\n\n\n\n# ================ Add Pilot House\n\n\ndef add_pilot_house(the_hull):\n\n\tbpy.ops.mesh.primitive_cube_add(size=2.0, \n\t\t\t\tenter_editmode=False, \n\t\t\t\tlocation=( -0.4,0,-0.3) )\n\n\tbpy.ops.transform.resize(value=(1,1,1))\n\tbpy.ops.object.transform_apply(scale=True,location=False)\n\n\n\tob = bpy.context.active_object\n\n\tob.name=\"Pilot House\"\n\n\tbpy.ops.object.mode_set(mode='EDIT')\n\n\tbpy.ops.mesh.select_all(action='DESELECT')\n\tbpy.ops.object.mode_set(mode='OBJECT')\n\n\tfor face in ob.data.polygons:\n\t\tface.select = geometry_helper.GoingUp( face.normal )\n\n\tbpy.ops.object.mode_set(mode='EDIT')\n\n\tbpy.ops.transform.resize(value=(1, 0.6, 1))\n\n\tbpy.ops.transform.translate(value=(-0.4, 0, 0))\n\n\tbpy.ops.mesh.bevel(offset=0.1)\n\n\tbpy.ops.object.mode_set(mode='OBJECT')\n\n\tbool_new = the_hull.hull_object.modifiers.new(type=\"BOOLEAN\", name=\"hull_join\")\n\tbool_new.object = ob\n\tbool_new.operation = 'UNION'\n\n\tcurve_helper.hide_object(ob)\n\nadd_pilot_house(the_hull)\n\ndef add_window(the_hull):\n\t# ================ Add Window\n\tbpy.ops.mesh.primitive_cube_add(size=1.0, location=( -.7, 0, 0.35) )\n\n\tbpy.ops.transform.resize(value=(2.2,0.5,0.3))\n\tbpy.ops.object.transform_apply(scale=True,location=False)\n\n\tob = bpy.context.active_object\n\tob.name=\"Windows\"\n\n\tbool_new = the_hull.hull_object.modifiers.new(type=\"BOOLEAN\", name=\"hull_cut\")\n\tbool_new.object = ob\n\tbool_new.operation = 'DIFFERENCE'\n\n\tcurve_helper.hide_object(ob)\n\nadd_window(the_hull)\n\ndef add_deck_cockpit(the_hull):\n\t# ================ Deck Cockpit\n\tbpy.ops.mesh.primitive_cube_add(size=1.0, location=( -2.5, 0, 0) )\n\n\tbpy.ops.transform.resize(value=(1.5,1,0.7))\n\tbpy.ops.object.transform_apply(scale=True,location=False)\n\n\tob = bpy.context.active_object\n\tob.name=\"Deck Cockpit\"\n\n\tbool_new = the_hull.hull_object.modifiers.new(type=\"BOOLEAN\", name=\"hull_cut\")\n\tbool_new.object = ob\n\tbool_new.operation = 'DIFFERENCE'\n\n\tcurve_helper.hide_object(ob)\n\nadd_deck_cockpit(the_hull)\n\n# ============================================================================================\ndef add_props():\n\tview_collection_props=curve_helper.make_collection(\"props\",bpy.context.scene.collection.children)\n\n\timport_library_path=\"assets/actors.blend/Collection/\"\n\tob = geometry_helper.import_object(import_library_path,\"man.stand\",(0,0.4,-0.88),view_collection_props)\n\tob = geometry_helper.import_object(import_library_path,\"man.lie_down\",(1.05,0,-0.64),view_collection_props)\n\tob = geometry_helper.import_object(import_library_path,\"man.sit_chair\",(-0.35,0,-0.4),view_collection_props)\n\tob = geometry_helper.import_object(import_library_path,\"man.sit_lean\",(-2.1,0.13,-0.87),view_collection_props)\n\n\n\timport_library_path=\"assets/boat_assets.blend/Collection/\"\n\t\n\tob = geometry_helper.import_object(import_library_path,\"mattress.twin\",(2,0,-0.9),view_collection_props)\n\tob = geometry_helper.import_object(import_library_path,\"mattress.twin\",(-2,0,-0.9),view_collection_props)\n\n\tob = geometry_helper.import_object(import_library_path,\"rope_coils_2_high\",(4.7,0,-0.7),view_collection_props)\n\tob = geometry_helper.import_object(import_library_path,\"yahama_gm_30hp\",(-2.4,0,-1.1),view_collection_props)\n\n\n\tob = geometry_helper.import_object(import_library_path,\"chair.reading_sitting_up_full\",(-0.7,0,-0.2),view_collection_props)\n\n\tob = geometry_helper.import_object(import_library_path,\"anchor\",(5.9,0.15,-0.25),view_collection_props)\n\tob = geometry_helper.import_object(import_library_path,\"anchor\",(5.9,-0.15,-0.25),view_collection_props)\n\n\n\tob = geometry_helper.import_object(import_library_path,\"tank_fuel_5gal\",(-0.60,0.65,-1.15),view_collection_props,rotation=(-90,0,90))\n\tob = geometry_helper.import_object(import_library_path,\"tank_fuel_5gal\",(-0.85,0.65,-1.15),view_collection_props,rotation=(-90,0,90))\n\tob = geometry_helper.import_object(import_library_path,\"tank_fuel_5gal\",(-0.35,0.65,-1.15),view_collection_props,rotation=(-90,0,90))\n\n\n\tob = geometry_helper.import_object(import_library_path,\"battery\",(0.6,0.17,-1.15),view_collection_props,rotation=(0,0,0))\n\n\nclean_distance=0.33\nx_locations=[\t-the_hull.hull_length/2+clean_distance,\n\t\t\t\tthe_hull.hull_length/2-clean_distance]\n\nthe_hull.cleanup_longitudal_ends(x_locations)\n\nlevels=[ -0.9,-0.5 ]\n\n# X station position\n# Vertical height adjust (or FALSE for no vertical height adjustment)\n# Cutout void in middle (False for watertight bulkhead)\n\nbulkhead_definitions = [ \n\t\n\t\t\t\t\t\t(0,levels[0],False),\n\t\t\t\t\t\t(1,levels[0],False),\n\t\t\t\t\t\t(-1,levels[0],False),\n\t\t\t\t\t\t(-2,levels[0],False),\n\t\t\t\t\t\t(2,levels[0],False),\n\n\t\t\t\t\t\t(3,levels[1],False),\n\t\t\t\t\t\t(-3,levels[1],False),\n\t\t\t\t\t\t(4,levels[1],True),\n\t\t\t\t\t\t(-4,levels[1],True),\n\n\t\t\t\t\t\t(5,False,False),\n\t\t\t\t\t\t(-5,False,False)\n]\n\n\nthe_hull.make_bulkheads(bulkhead_definitions)\nthe_hull.make_longitudal_booleans()\n\n\t\t\t\n#the_hull.hull_object.hide_set(True)\n#the_hull.hull_object.hide_render=True\n\n\n","sub_path":"tests/hull_test_11_4.py","file_name":"hull_test_11_4.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"543271559","text":"import sys\n\ndef count(n):\n countlist = [0, 0, 1, 1]\n if 1<=n and n<=3:\n return countlist[n]\n else:\n for i in range(4,n+1):\n min_num = countlist[i-1]+1\n if i%3 ==0:\n tmp = countlist[int(i/3)]+1\n min_num = min(min_num,tmp)\n elif i%2 ==0:\n tmp = countlist[int(i/2)] + 1\n min_num = min(min_num, tmp)\n countlist.append(min_num)\n return countlist[i]\n\na=int(sys.stdin.readline())\nprint(count(a))","sub_path":"BOJ/1000/1463.py","file_name":"1463.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"626029844","text":"# 06/27/2019\n# import numpy as np\n\n# This is for \"type hints\". Totally optional and Python doesn't actually enforce\n# that if you say a function takes in a List it actually does, but I find they're\n# helpful for documentation purposes\n# The base \"primitive\" types like str, int, float, bool don't need to be imported.\n# List, Dict, Set, Tuple, etc. all need to be imported, however.\n# https://docs.python.org/3/library/typing.html\nfrom typing import List\n\n# INPUT MATRIX DIMENSIONS\n#print(\"matrix m x n. input dimensions:\")\n\n# checking for n x n dimensions\n# dim_'x' = dimension in type 'x'\n\"\"\"\ndim_str = input(\"\")\ndim_list = dim_str.split(\" \")\nvalid = False\n\"\"\"\n\n# I have no idea what I'm doing lines 14-29\n\"\"\"\nwhile not valid:\n try:\n dim_list[0]\n valid = True\n except IndexError:\n print(\"Invalid input. Reenter dimensions.\")\n dim_str = input(\"\")\n\nwhile dim_list[0] != dim_list[2]:\n print(\"Invalid matrix. Accept only matrices of n x n.\")\n dim_str = input(\"\")\n dim_list = dim_str.split(\" \")\nelse:\n pass\n\"\"\"\n\n# dim = int(dim_list[0])\n\n\n# INPUT MATRIX VALUES\n# mat_row = list of one row in matrix\n# mat_val = list of 'mat_row'\n\n# \"Helper functions\" are functions that are only used\n# internally inside the files they're defined and not meant\n# to be used or exposed by the user. When you want to define a\n# helper function, it's good practice to pre-pend its name with\n# double underscores (__) to help hide it from the user.\ndef __is_str_numeric(string: str) -> bool:\n \"\"\"\n Helper function that checks whether string is integer or float.\n\n Parameters\n ----------\n string\n\n Returns\n -------\n bool\n \"\"\"\n try:\n float(string)\n except ValueError:\n return False\n return True\n\n\ndef input_matrix() -> List[List[float]]:\n \"\"\"\n Takes in user input and returns defined matrix.\n\n Returns\n -------\n List[List[float]]: user-defined matrix of floats\n \"\"\"\n\n num_rows = input(\"Input the number of rows: \")\n # The \"isdigit()\" string method checks whether a\n # string is a positive integer. We then check\n # whether the input is equal to 0 as well.\n while not num_rows.isdigit() or not int(num_rows):\n num_rows = input(\"Invalid row input. Try again: \")\n num_rows = int(num_rows)\n\n num_cols = input(\"Input the number of cols: \")\n while not num_cols.isdigit() or not int(num_cols):\n num_cols = input(\"Invalid col input. Try again: \")\n num_cols = int(num_cols)\n\n matrix = []\n\n for row_num in range(1, num_rows+1):\n input_row = input(f\"Row {row_num}: \")\n while True: # Runs forever until broken out of\n # Check each entry is either an integer or a float\n # parsed_row then becomes a list of boolean (True/False) values\n parsed_row = [__is_str_numeric(entry) for entry in input_row.split(\",\")]\n # Check dimensions match\n if len(parsed_row) != num_cols:\n print(f\"Expected {num_cols} entries. Got {len(parsed_row)}. Try again.\")\n input_row = input(f\"Row {row_num}: \")\n # parsed_row is now a list of booleans. The all() function checks\n # if an iterable is all True. In this case, it's really checking that\n # all entries in input_row are all either an int or a float\n # https://docs.python.org/3/library/functions.html#all\n elif not all(parsed_row):\n print(f\"Please only enter integers or floats. Try again.\")\n input_row = input(f\"Row {row_num}: \")\n else:\n # Break out of the while-loop because the condition\n # is satisfied\n validated_row = [float(entry) for entry in input_row.split(\",\")]\n matrix.append(validated_row)\n break\n\n print(\"Generated matrix:\")\n for row in matrix:\n print(row)\n\n return matrix\n\n\ndef row_input():\n row_number = 1\n\n print(\"Input matrix row by row\\n\"\n \"Separate terms by commas\\n\"\n \"Press enter after each row\\n\")\n mat_vals = \"\"\n for a in range(dim):\n mat_row = input(f\"row {row_number}: \")\n compatible = False\n while not compatible:\n try:\n row = [float(num) for num in mat_row.split(\",\")]\n\n # checking for dimension fit\n if len(row) != dim:\n print(\"M Format is not compatible. Re-enter row.\")\n mat_row = input(f\"row {row_number}: \")\n # \"elif\" is a contraction for \"else if\"\n # It's handy for avoiding deep layers of indentation\n elif row_number != 1:\n # applies only after first input\n mat_vals += ','\n else:\n pass\n row_number += 1\n mat_vals += mat_row\n compatible = True\n\n # checking for format fit\n except ValueError:\n print(\"Format is not compatible. Reenter row.\")\n mat_row = input(f\"row {row_number}: \")\n return mat_vals\n\n\n\"\"\"\n# CONVERTING SEPARATED ROW INPUTS TO ONE MATRIX\n# num_'x' = entered numbers in type 'x'\nnum_str = row_input()\nnum_list = [float(num) for num in num_str.split(\",\")]\nnum_mat = [num_list[i * dim:(i + 1) * dim] for i in range((dim ** 2 + dim - 1) // dim)]\nA = matrix_A = np.array(num_mat)\n\"\"\"\n\n\ndef diag_mat(matrix):\n \"\"\"\n ROW REDUCING (rrf) TO A DIAGONAL MATRIX\n a = row being reduced to zero\n b = row remaining in the diagonal\n c and d scale rows a and b to the same number\n\n Parameters\n ----------\n matrix\n\n Returns\n -------\n\n \"\"\"\n for b in range(len(matrix)):\n for a in range(len(matrix[0])):\n if a <= b:\n pass\n\n # in Python, 0 is a \"falsey\" value (https://docs.python.org/2.4/lib/truth.html)\n # therefore, checking whether something is equivalent to zero is the same as\n # checking whether its \"false\". It's convention to use \"not (variable)\" instead\n # of \"(variable) == 0\"\n if not matrix[b][b]:\n # avoiding ZeroDivisionError\n pass\n\n # The \"else\" here is unnecessary because if the two conditions above pass\n # it will always run\n matrix[a] = matrix[a] - matrix[b] * (matrix[a][b] / matrix[b][b])\n # \"\\n\" is special text formatting code meaning \"new line\"\n print(f\"Diagonal matrix:\\n{matrix}\")\n\n\ndef multiply_diag(matrix):\n \"\"\"\n FINDING THE DETERMINANT BY MULTIPLYING DIAGONAL\n l = number of values in the diagonal\n m = diagonal position (A[m][m] = diagonal value)\n n = multiplied diagonal values complied: determinant\n\n Parameters\n ----------\n matrix\n\n Returns\n -------\n\n \"\"\"\n n = matrix[0][0]\n for l in range(len(matrix)-1):\n m = l + 1\n n *= float(matrix[m][m])\n # undoing negative zero answers\n if n == -0:\n n = abs(n)\n print(f\"determinant of matrix: {n}\")\n return n\n\n\n# LINEAR INDEPENDENCE\ndef is_linearly_independent(matrix: List[List[float]]) -> bool:\n \"\"\"\n Parameters\n ----------\n List[List[float]]: 2D matrix of floats\n\n Returns\n -------\n bool: Whether matrix is linearly independent\n \"\"\"\n\n determinant = multiply_diag(matrix)\n if not determinant:\n print(\"Matrix is linearly dependent.\")\n return False\n # Don't need to explicitly write \"else\" here\n print(\"Matrix is linearly independent.\")\n return True\n\n\ndef main():\n matrix = input_matrix()\n # print(is_linearly_independent(matrix))\n # print(multiply_diag(matrix))\n\n\nif __name__ == \"__main__\":\n # If you click the green arrow to the left of this\n # and select \"run\" from the dropdown it will run\n # the \"main\" function above\n\n # This tells the computer where to \"enter\" the program\n # if you ran \"python3 determinant-calculator.py\" through a terminal\n # it would enter this if statement and then execute the main() function.\n # It's a good idea to use the main statement to control the execution of your\n # program/code. Without it, the code will simply execute top to bottom\n # which can cause some headaches and errors. By splitting things up into\n # functions we can control the order and circumstance of execution.\n main()\n","sub_path":"determinant-calculator.py","file_name":"determinant-calculator.py","file_ext":"py","file_size_in_byte":8390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"291074185","text":"from auth.auth import Auth\nfrom feature.features import Features\nfrom mode.mode import Mode\nfrom config.config import Config\n\n\nclass Main(object):\n def __init__(self):\n self.ip = '192.168.220.49'\n self.cookieId = None\n\n def main(self):\n auth = Auth(self.ip)\n self.cookieId = auth.login('nsroot', 'nsroot')\n\n config = Config('192.168.220.49', self.cookieId, 'nsRest', '192.168.50', '255.255.255.0', 'GTM-03:00-BRT-America/Sao_paulo')\n config.initialConf()\n\n # feature = Features(self.ip, self.cookieId)\n # feature.enableAppFirewall()\n\n # mode = Mode(self.ip, self.cookieId)\n # mode.enableULFD()\n\n auth.logoff(self.cookieId)\n\n\nif __name__ == '__main__':\n Main().main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"490390798","text":"from django.shortcuts import render\n\nfrom pyconbalkan.conference.models import Conference, CountDown\nfrom pyconbalkan.organizers.models import Volunteer\nfrom pyconbalkan.speaker.models import Speaker\n\n\ndef home(request):\n conference = Conference.objects.filter(active=True)\n count_down = CountDown.objects.filter(active=True)\n speakers = Speaker.objects.filter(active=True)\n context = {\n 'speakers': speakers,\n 'conference': conference.first() if conference else None,\n 'count_down': count_down.first() if count_down else None,\n }\n return render(request, 'home.html', context)\n\n\ndef organizers(request):\n volunteers = Volunteer.objects.filter(type=Volunteer.VOLUNTEER, active=True)\n organizers = Volunteer.objects.filter(type=Volunteer.ORGANIZER, active=True)\n conference = Conference.objects.filter(active=True)\n context = {\n 'volunteers': volunteers,\n 'organizers': organizers,\n 'conference': conference.first() if conference else None,\n }\n return render(request, 'organizers.html', context)\n","sub_path":"pyconbalkan/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"371823040","text":"def ReadAFile(FileName):\n 'Prints number of characters in file.'\n InF = open(FileName, 'r')\n text= InF.read()\n edit = text.split()\n print(len(edit))\n InF.close()\nReadAFile('Odoom_Jason.txt')\nReadAFile(input('Please enter a filename: \\n'))\nprint('Thank you for your input')\n","sub_path":"Notepad/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"531980228","text":"import socket\nimport datetime\nimport logging\nfrom flask import Flask\n\napplication = Flask(__name__)\n\nlog = logging.getLogger(\"mushroom-generator\")\n\n\ndef get_logfile():\n return \"/mnt/important_logs.txt\"\n\n\ndef read_log():\n try:\n write_log()\n\n try:\n log_file = open(get_logfile(), \"r\")\n except IOError:\n log_file = open(get_logfile(), \"w\")\n\n log_data = log_file.read()\n log_file.close()\n\n log.info(\"Log reading successful\")\n \n return log_data\n\n except Exception as e:\n log.warning(\"Log reading failed: \" + str(e))\n \n return str(e)\n\n\ndef write_log():\n try:\n try:\n log_file = open(get_logfile(), \"a\")\n except IOError:\n log_file = open(get_logfile(), \"w\")\n\n\n log_line = \"[ Hostname: \" + str(socket.gethostname()) + \" Datetime: \" + str(datetime.datetime.now()) + \" ]\\n\"\n log_file.write(log_line)\n\n log_file.close()\n\n log.info(\"Logging successful\")\n except Exception as e:\n log.warning(\"Logging failed: \" + str(e))\n\n\n\n@application.route(\"/\")\ndef hello():\n showed_output = \"

Hello World! Greetings from \" + socket.gethostname() + \"

\" + read_log() + \"

\"\n \n return showed_output\n\n\nif __name__ == \"__main__\":\n write_log()\n application.run()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"458656621","text":"from collections import namedtuple\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select, WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import (\n NoSuchElementException, WebDriverException,\n TimeoutException, StaleElementReferenceException\n)\nimport datetime\nimport time\nimport shutil\nimport os\nfrom enum import Enum, unique\nfrom banalcow import banalutil\nfrom banalcow.driver import BanalDriver\n\n\nclass NetbankError(Exception):\n pass\n\n\nclass FileNotFoundError(OSError):\n pass\n\n@unique\nclass AccountType(Enum):\n HOME_LOAN = 1\n COMPLETE_ACCESS = 2\n CREDIT_CARD = 3\n MISA = 4\n\n\nclass Netbank:\n login_url = \"https://www.my.commbank.com.au/netbank/Logon/Logon.aspx\"\n date_fmt = \"%d/%m/%Y\"\n\n def __init__(self, username, password, sleep, retry, **kwargs):\n self.username = username\n self.password = password\n self.sleep = sleep\n self.retry = retry\n self.from_date = kwargs.get('from_date')\n self.to_date = kwargs.get('to_date')\n self.proxy = kwargs.get('proxy')\n self.chrome_driver_executable_path = kwargs.get(\n 'chrome_driver_executable_path'\n )\n self.only_home_loans = kwargs.get('only_home_loans', False)\n self.debug = kwargs.get('debug', False)\n\n\n if self.__from_date > self.__to_date:\n raise NetbankError(\n \"{0} is greater than {1}\".\n format(self.__from_date, self.__to_date)\n )\n\n self.bd = BanalDriver(\n chrome_driver_executable_path=self.chrome_driver_executable_path,\n proxy=self.proxy,\n )\n self.driver = self.bd.driver\n\n @property\n def today(self):\n return datetime.datetime.now()\n\n @property\n def last_year(self):\n return banalutil.pastdt()\n\n @property\n def from_date(self):\n return self.__from_date\n\n @from_date.setter\n def from_date(self, from_date):\n if from_date is None:\n self.__from_date = self.last_year\n else:\n try:\n self.__from_date = datetime.datetime.strptime(\n from_date, self.date_fmt\n )\n except ValueError:\n raise NetbankError(\n \"{0} does not match {1}\".format(from_date, self.date_fmt)\n )\n\n @property\n def to_date(self):\n return self.__to_date\n\n @to_date.setter\n def to_date(self, to_date):\n if to_date is None:\n self.__to_date = self.today\n else:\n try:\n self.__to_date = datetime.datetime.strptime(\n to_date, self.date_fmt\n )\n except ValueError:\n raise NetbankError(\n \"{0} does not match {1}\".format(to_date, self.date_fmt)\n )\n\n def login(self):\n self.driver.get(self.login_url)\n user_field = self.driver.find_element_by_id('txtMyClientNumber_field')\n password_field = self.driver.find_element_by_id('txtMyPassword_field')\n submit_button = self.driver.find_element_by_id('btnLogon_field')\n user_field.send_keys(self.username)\n password_field.send_keys(self.password)\n submit_button.click()\n self.homepage = self.driver.current_url\n\n def get_accounts(self):\n \"\"\"Account information in the form of a dict of tuples.\"\"\"\n\n # Use WebDriverWait to wait for the presence of the portfolio table.\n # This was taking a bit too long to render sometimes and selenium would\n # throw an exception not being able to find the element in time.\n WebDriverWait(self.driver, self.sleep).until(\n EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"StartMainContent\"]/div/div[2]/div[1]/main/section[1]/div/div[1]')\n )\n )\n\n account = namedtuple(\n 'account',\n 'name balance available href filename account_type'\n )\n\n accounts = {}\n count = 1\n while True:\n account_type = None\n\n try:\n self.driver.find_element_by_xpath('//*[@id=\"StartMainContent\"]/div/div[2]/div[1]/main/section[1]/div/div[1]/div[{0}]'.format(count))\n except NoSuchElementException:\n break\n\n name = self.driver.find_element_by_xpath('//*[@id=\"StartMainContent\"]/div/div[2]/div[1]/main/section[1]/div/div[1]/div[{0}]/div/div[1]/div[2]/div/div/div/a/h3'.format(count))\n\n if name.get_attribute('title').lower() == 'commsec shares':\n count += 1\n continue\n\n if name.get_attribute('title').lower() == 'youthsaver':\n count += 1\n continue\n\n if name.get_attribute('title').lower() == 'home loan':\n account_type = AccountType.HOME_LOAN\n elif name.get_attribute('title').lower() == 'complete access':\n account_type = AccountType.COMPLETE_ACCESS\n elif name.get_attribute('title').lower() == 'mastercard platinum':\n account_type = AccountType.CREDIT_CARD\n elif name.get_attribute('title').lower() == 'misa':\n account_type = AccountType.MISA\n\n if self.only_home_loans and account_type != AccountType.HOME_LOAN:\n count += 1\n continue\n\n number = self.driver.find_element_by_xpath('//*[@id=\"StartMainContent\"]/div/div[2]/div[1]/main/section[1]/div/div[1]/div[{0}]/div/div[1]/div[2]/div/div/span/div'.format(count))\n balance = self.driver.find_element_by_xpath('//*[@id=\"StartMainContent\"]/div/div[2]/div[1]/main/section[1]/div/div[1]/div[{0}]/div/div[1]/div[2]/div/ul/li[1]/span[2]'.format(count))\n available = self.driver.find_element_by_xpath('//*[@id=\"StartMainContent\"]/div/div[2]/div[1]/main/section[1]/div/div[1]/div[{0}]/div/div[1]/div[2]/div/ul/li[2]/span[2]'.format(count))\n href = self.driver.find_element_by_xpath('//*[@id=\"StartMainContent\"]/div/div[2]/div[1]/main/section[1]/div/div[1]/div[{0}]/div/div[1]/div[2]/div/div/div/a'.format(count))\n\n # Remove all non-digits from account number\n accountnumber = ''.join(filter(str.isdigit, number.text))\n\n if accounts.get(accountnumber):\n count += 1\n continue\n\n filename = banalutil.filename(\n accountnumber, self.from_date, self.to_date\n )\n\n if self.debug:\n print(\n \"DEBUG1: {0},{1},{2},{3},{4},{5},{6}\".format(\n name.get_attribute('title'),\n accountnumber,\n balance.get_attribute('title'),\n available.get_attribute('title'),\n href.get_attribute('href'),\n filename,\n account_type\n )\n )\n\n accounts[accountnumber] = account(\n name=name.get_attribute('title'),\n balance=balance.get_attribute('title'),\n available=available.get_attribute('title'),\n href=href.get_attribute('href'),\n filename=filename,\n account_type=account_type\n )\n\n count += 1\n\n return accounts\n\n def logout(self):\n attempts = 0\n while attempts < self.retry:\n attempts += 1\n try:\n logout = WebDriverWait(self.driver, self.sleep).until(\n EC.presence_of_element_located(\n (By.XPATH, '//*[@id=header]/div[2]/nav/div[1]/ul/li[3]')\n )\n )\n except TimeoutException:\n break\n\n try:\n logout.click()\n except StaleElementReferenceException:\n pass\n else:\n break\n\n def access_homepage(self):\n self.driver.get(self.homepage)\n\n def access_account(self, accountnumber, href):\n self.driver.get(href)\n\n def view_transactions(self):\n \"\"\" Click the view transctions link within the Home Loan\n accounts page\n\n Non Home Loan accounts dont have this link.\n \"\"\"\n attempts = 0\n while (attempts < self.retry):\n attempts += 1\n try:\n search_elem = WebDriverWait(self.driver, self.sleep).until(\n EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"lnk-transactions-viewAll\"]')\n\n )\n )\n except TimeoutException:\n break\n\n try:\n search_elem.click()\n except StaleElementReferenceException:\n pass\n else:\n break\n\n def download_ofx(self, filename, account_type):\n attempts = 0\n while (attempts < self.retry):\n attempts += 1\n try:\n element_xpath = '//*[@id=\"ctl00_CustomFooterContentPlaceHolder_updatePanelExport1\"]/div'\n if account_type == AccountType.COMPLETE_ACCESS:\n element_xpath = '//*[@id=\"export-link\"]'\n export_elem = WebDriverWait(self.driver, self.sleep).until(\n EC.presence_of_element_located(\n (\n By.XPATH,\n element_xpath\n )\n )\n )\n except (\n NoSuchElementException,\n WebDriverException,\n TimeoutException\n ) as e:\n if self.debug:\n print(e)\n\n if attempts == self.retry:\n if not self.debug:\n session.logout()\n self.driver.quit()\n raise NetbankError(\"Unable to find export element\")\n else:\n export_elem.click()\n break\n\n \"\"\"\n try:\n export_type_elem = Select(\n self.driver.find_element_by_xpath(\n '//*[@id=\"ctl00_CustomFooterContentPlaceHolder_ddlExportType1_field\"]'\n\n )\n )\n except (NoSuchElementException, WebDriverException) as e:\n print(e)\n else:\n export_type_elem.select_by_value('OFX')\n \"\"\"\n\n try:\n if account_type == AccountType.COMPLETE_ACCESS:\n export_type_elem = self.driver.find_element_by_xpath(\n '//*[@id=\"export-format-type\"]/div/div[2]/label'\n )\n else:\n export_type_elem = Select(\n self.driver.find_element_by_xpath(\n '//*[@id=\"ctl00_CustomFooterContentPlaceHolder_ddlExportType1_field\"]'\n\n )\n )\n except (NoSuchElementException, WebDriverException) as e:\n print(e)\n else:\n if account_type == AccountType.COMPLETE_ACCESS:\n export_type_elem.click()\n else:\n export_type_elem.select_by_value('OFX')\n\n\n try:\n submit_button_xpath = '//*[@id=\"ctl00_CustomFooterContentPlaceHolder_lbExport1\"]'\n if account_type == AccountType.COMPLETE_ACCESS:\n submit_button_xpath = '//*[@id=\"txnListExport-submit-btn\"]'\n submit_button = self.driver.find_element_by_xpath(submit_button_xpath)\n except (NoSuchElementException, WebDriverException) as e:\n print(e)\n else:\n submit_button.click()\n\n ofxdata_filename = 'OFXData.ofx'\n count = 1\n while not os.path.exists(ofxdata_filename):\n time.sleep(1)\n if count > self.retry:\n raise NetbankError(\n \"Unable to find file {0}\".\n format(filename)\n )\n count += 1\n shutil.move(ofxdata_filename, filename)\n","sub_path":"banalcow/netbank.py","file_name":"netbank.py","file_ext":"py","file_size_in_byte":12225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"9513858","text":"# noinspection PyUnresolvedReferences\nfrom shovel import task\nimport subprocess\nimport os\n\n@task\ndef deploy():\n #command to push file to sauce storage\n print(\"Publishing APK file to Sauce Storage\")\n command = ['curl',\n '-u',\n \"{sauce_user}:{sauce_key}\".format(sauce_user=os.environ['SAUCE_USER'],sauce_key=os.environ['SAUCE_KEY']), #auth info\n '-X',\n 'POST',\n '-H',\n '\"Content-Type: application/octet-stream\"',\n \"https://saucelabs.com/rest/v1/storage/{user}/app-debug.apk?overwrite=true\".format(user=os.environ['SAUCE_USER']), #sauce storage location\n '--data-binary',\n '@staging/app-debug.apk'] # file to upload location\n print(' '.join(command))\n result = subprocess.call(' '.join(command), shell=True)\n if result != 0:\n print(\"Publishing to Sauce Storage Failed.\")\n exit(1)\n\n","sub_path":"shovel/sauce.py","file_name":"sauce.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"149953248","text":"from flask import Flask, render_template, session, request\r\nfrom random import randint\r\nfrom champions import champions\r\nimport json\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'testtest'\r\n\r\n@app.route('/', methods=['GET','POST'])\r\ndef index():\r\n\r\n faction = request.form.getlist('faction')\r\n ad_ap = request.form.getlist('ad-ap')\r\n role = request.form.getlist('role')\r\n champ_filter = session['formdata']\r\n champ_list = []\r\n\r\n for champ in champions:\r\n # Checking filters\r\n if len([i for i in champ['ad-ap'] if i in ad_ap]) > 0 \\\r\n and champ['faction'] in faction \\\r\n and champ['name'] in champ_filter \\\r\n and len([j for j in champ['role'] if j in role]) > 0:\r\n champ_list += [champ['name']]\r\n\r\n # If there are zero champion possibilities, output none\r\n try:\r\n chosen_champ = champ_list[randint(0, len(champ_list)-1)]\r\n except:\r\n chosen_champ = 'None'\r\n\r\n return render_template('index.html',\r\n chosen_champ=chosen_champ,\r\n faction=faction,\r\n ad_ap=ad_ap,\r\n champ_filt=champ_filter)\r\n\r\n# Champ select page\r\n@app.route('/champselect/', methods=['GET','POST'])\r\ndef champselect():\r\n\r\n # Read list of all champions from a text file\r\n f = open('listofchamps.txt', 'r')\r\n champlist = []\r\n temp = f.readlines()[::2]\r\n for champ in temp:\r\n champlist += [champ.replace('\\n', '')]\r\n\r\n if request.method == 'POST':\r\n cf = request.json\r\n session['formdata'] = cf\r\n if 'formdata' in session:\r\n return json.dumps(session['formdata'])\r\n return render_template('champ_select.html', champlist=champlist)\r\n\r\n# About page\r\n@app.route('/about/')\r\ndef about():\r\n return render_template('about.html')\r\n\r\n@app.route('/contact/')\r\ndef contact():\r\n return render_template('contact.html')\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n app.config['SESSION_TYPE'] = 'filesystem'","sub_path":"champ_select/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"288379567","text":"import socket\nimport sys\nimport time\nimport json\nimport re\nimport datetime\nimport sqlite3\nimport json\n\ntrajectories = []\n\ndef get_json(query):\n #try:\n elems = query.split(\"/\")\n if len(elems) == 3:\n #####################\n ### SINGLE POINTS ###\n #####################\n \n gap = int(elems[0])\n da = datetime.date(2015, 1, 5) + datetime.timedelta(days = gap) # first day of the dataset + query offset\n y = da.year\n m = da.month\n d = da.day\n h = int(elems[1])\n mi = int(elems[2])\n \n conn = sqlite3.connect(\"../database/altran.db\")\n c = conn.cursor()\n \n data = []\n for row in c.execute((\"SELECT lat, lng, signal_avg \" +\n \"FROM altran \" +\n \"WHERE year = \" + str(y) +\n \" AND month = \" + str(m) +\n \" AND day = \" + str(d) +\n \" AND hours = \" + str(h) +\n \" AND minutes = \" + str(mi))):\n lat = row[0]\n lng = row[1]\n sig = row[2]\n point = {'lat': lat, 'lng': lng, 'signal_avg': sig}\n data.append(point)\n\n conn.close()\n \n return json.dumps({'coordinates': data})\n else:\n ####################\n ### TRAJECTORIES ###\n ####################\n print(trajectories[int(query)])\n return json.dumps({'coordinates': trajectories[int(query)]})\n #except:\n # return \"{\\\"state\\\": -1}\"\n\n\ndef read_trajectories():\n k = 0\n with open(\"../trajectories.data\") as f:\n lines = f.readlines()\n for line in lines:\n line = line[:-1]\n for seg in line.split(\";\"):\n trajectory = []\n for segseg in seg.split(\",\"):\n position = {}\n segsegseg = segseg.split(\"#\")\n position['lat'] = float(segsegseg[0])\n position['lng'] = float(segsegseg[1])\n trajectory.append(position)\n trajectories.append(trajectory)\n k = k + 1\n print(\"num trajectories: \" + str(k))\n \n\ndef serve(port):\n # Create communication socket and listen on port.\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((socket.gethostname(), port))\n print(\"Started server: %s:%s\" % (socket.gethostname(), port))\n server.listen(3)\n # Server loop.\n while True:\n print(\"\\x1b[1mWaiting for requests on port %d ... \\x1b[0m\" % port)\n (client, address) = server.accept()\n print(\"Incoming request at \" + time.ctime())\n message = client.recv(1 << 31)\n print(message)\n message = message.decode(\"ascii\")\n # Consider only HTTP GET requests.\n print(\"Handling request at \" + time.ctime())\n match = re.match(\"^GET /(.*) HTTP\", message)\n if not match:\n continue\n query = match.group(1)\n print(\"HTTP GET request received: \\\"%s\\\"\" % query)\n content = get_json(query)\n content_type = \"text/plain\"\n # Send result with proper HTTP headers.\n # print(\"Sending content: \" + content)\n result = (\"HTTP/1.1 200 OK\\r\\n\"\n \"Content-type: %s\\r\\n\"\n \"Access-Control-Allow-Origin: *\\n\"\n \"Content-length: %s\\r\\n\\r\\n%s\") % (content_type,\n len(content),\n content)\n client.send(result.encode())\n client.close()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: python3 server.py \")\n exit(1)\n port = int(sys.argv[1])\n read_trajectories()\n serve(port)\n","sub_path":"maps/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"347174164","text":"import usaepay\nfrom usaepay import run_call\n\n\ndef get(data={}):\n\t\"\"\"Calls /invoices/{invoice_key}/payments\n\tRetreive details of a invoice in database\n\n\tArgs:\n\t\tdata (dict) contents:\n\t\t\tinvoice_key (str) required\n\n\tReturns:\n\t\tDictionary InvoicePaymentList\n\t\"\"\"\n\tparams={}\n\tif 'limit' in data:\n\t\tparams['limit']=data['limit']\n\tif 'offset' in data:\n\t\tparams['offset']=data['offset']\n\n\tif not 'invoice_key' in data:\n\t\traise Exception('invoice_key required for invoices.payments.get()')\n\n\tpath='/invoices/' + data['invoice_key'] + '/payments'\n\treturn run_call('get',path,data,params)\n","sub_path":"invoices/payments/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"294795131","text":"import sys\nimport os\nimport urllib.request\nimport collections\nimport re\nfrom subprocess import Popen, PIPE\nimport gzip\n\n\nimport KeggNames\nkn = KeggNames.keggnames\n\n# Use my NCBI API key to set server/second server access <= 10\nos.environ[\"NCBI_API_KEY\"] = \"f4142f101db95406745385d940b13c37ab07\"\n\n# Make data struct to store ori info\nOri = collections.namedtuple('Ori', ['seq','len','start','end'])\n\ndef get_kegg_organism_list():\n url = 'http://rest.kegg.jp/list/organism'\n data = urllib.request.urlopen(url).read().decode('utf-8').split('\\n')\n data.pop()\n return data\n\n\n\nclass Organism:\n \"\"\"Container class storing data scraped from KEGG, NCBI and TUBIC relating\n to a single organism present in KEGG organisms list.\"\"\"\n\n def __init__(self, kegg_genome_id, kegg_organism_id = None, strain_name = None, taxa_string = None):\n self.kegg_genome_id = kegg_genome_id # e.g T01001\n self.kegg_organism_id = kegg_organism_id # e.g hsa\n self.strain_name = strain_name # e.g Homo sapiens (human)\n self.taxa_string = taxa_string # e.g Eukaryotes;Animals;Vertebrates;Mammals\n self.annotated_gff = []\n self.refseq = ''\n self.genbank = ''\n self.get_metadata()\n self.doric = ''\n\n\n def in_taxonomic_group(self, group):\n \"\"\"Returns True of group in self.taxa_string, False otherwise\"\"\"\n return group in self.taxa_string\n\n def get_genome_entry_from_kegg(self):\n url = f'http://rest.kegg.jp/get/gn:{self.kegg_genome_id}'\n # Download webpage from kegg, read, and convert to regular str type\n return urllib.request.urlopen(url).read().decode('utf-8')\n\n def search_ncbi_for_refseq_id(self, gb_id):\n cmd = f\"esearch -db nuccore -query {gb_id} | elink -target nuccore -name nuccore_nuccore_gbrs | efetch -format docsum | xtract -pattern DocumentSummary -element AccessionVersion Slen Title | sed 's/,.*//' | sort -t $'\\t' -k 2,2nr\"\n result,err = Popen(cmd, stdout=PIPE,stderr=PIPE,shell=True).communicate()\n result = result.decode('utf-8').strip().split('\\n')\n if len(result) != 1:\n log = open(self.kegg_organism_id + '_multiple_refseq_ids.log','w')\n log.write('\\n'.join(result))\n log.close()\n return result[0].split('\\t')[0]\n return result[0].split('\\t')[0]\n\n\n def get_metadata(self):\n \"\"\"Makes request to rest.kegg.jp/get/kegg_genome_id to get various metadata:\n genome length; the sequence id of the genome used for kegg's annotations \n (either refseq or genbank, if genbank, ncbi is searched to get refseq_id\n if available); and ncbi taxonomy\"\"\"\n entry = self.get_genome_entry_from_kegg()\n # Make regex engine\n match = lambda a, b : re.findall(rf'{a}\\s+(\\S.*)\\n',b)\n # Resolve basic metadata\n self.genome_length = match(kn.LENGTH, entry)\n if len(self.genome_length) > 0:\n self.genome_length = int(self.genome_length[0])\n if self.kegg_organism_id == None:\n if len(self.organism_id) > 0:\n self.organism_id = self.organism_id[0]\n self.kegg_organism_id = match(kn.NAME, entry).split(',')[0]\n\n if self.strain_name == None:\n self.strain_name = match(kn.DEFINITION, entry)\n if len(self.strain_name) > 0:\n self.strain_name = self.strain_name[0]\n if self.taxa_string == None:\n self.taxa_string = match(kn.LINEAGE, entry)\n if len(self.taxa_string) > 0:\n self.taxa_string= self.taxa_string[0]\n\n self.assembly = match(kn.DATA_SOURCE, entry)\n if len(self.assembly) > 0:\n self.assembly = self.assembly[0]\n self.assembly = re.findall('Assembly:([\\.\\w]+)', self.assembly)[0]\n # Attempt to determine the Refseq sequence id used for this kegg organism\n seq = match(kn.SEQUENCE, entry)\n if len(seq) > 0:\n seq = seq[0]\n if seq[0:2] == 'RS':\n self.refseq, self.genbank = re.findall(r'RS:(\\S+)\\s+\\(GB:(\\S+)\\)',seq)[0]\n if seq[0:2] == 'GB':\n self.genbank = re.findall(r'GB:(\\S+)',seq)[0]\n self.refseq = self.search_ncbi_for_refseq_id(self.genbank)\n \n def get_kegg_gene_annotations(self, annotation):\n url = f'http://rest.kegg.jp/link/{annotation}/{self.kegg_genome_id}'\n data = urllib.request.urlopen(url).read().decode('utf-8').split('\\n')\n data = [tuple(e.split('\\t')) for e in data]\n data.pop()\n return data\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n\n\n# to do\n\n# def generate_index(self, gff, field):\n# match = lambda a,b : re.findall(rf'[^\\w]{a}=([\\w]+)',b)\n# d = dict()\n# for index, line in enumerate(gff):\n# tag = match(field, line)\n# if len(tag) == 1:\n# d[tag[0]] = index\n# return d\n#\n# def set_gff_filename(self, file_name):\n# self.gff_filename = file_name\n#\n# def kegg_annotate_gff(self):\n# \"\"\"Adds KEGG pathway, Ontology and other annotations to gff\"\"\"\n# if len(self.annotated_gff) == 0:\n# with gzip.open(self.gff_filename, 'rb') as f:\n# gff = f.read().decode('utf-8').split('\\n')\n# gff.pop()\n# gff = [g for g in gff if g[0] != '#']\n# gff = [g for g in gff if (g.split('\\t')[2] == 'gene' and g.split('\\t')[0] == 'NC_002663.1')]\n# self.annotated_gff = gff\n#\n# old_tag_idx = self.generate_index(gff, 'old_locus_tag')\n# tag_idx = self.generate_index(gff, 'locus_tag')\n# annotations = self.get_kegg_gene_annotations('pathway')\n# remove_header = lambda x : re.findall(r'.*:(.*)',x)[0]\n#\n# log = open(self.kegg_genome_id + '_failed_kegg_annotation.log','w')\n# for gene_id, kegg_anno in annotations:\n# gene_id = remove_header(gene_id)\n# idx = old_tag_idx.get(gene_id, -1) \n# if idx == -1:\n# idx = tag_idx.get(gene_id, -1)\n# if idx == -1:\n# log.write(gene_id + '\\n')\n# continue\n# self.annotated_gff[idx] += '\\t' + kegg_anno\n# log.close()\n#\n# def write_annotated_gff(self):\n# data = [(int(e.split('\\t')[3]),e) for e in self.annotated_gff]\n# data = sorted(data, key=lambda x: x[0])\n# self.annotated_gff = [e for _,e in data]\n# with gzip.open(self.gff_filename, 'wb') as f:\n# f.write('\\n'.join(self.annotated_gff))\n","sub_path":"enrichment_analysis/kegg_analysis/OriTerKeggScrape.py","file_name":"OriTerKeggScrape.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"241945655","text":"import time\nimport euler\n\neuler.print_problem(19)\nstart = time.time()\n\n# ==================================================\nn_year = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # Normal Year\nl_year = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # Leap Year\nweek = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n\ndone = False\nyear = 1901\nday_count = 0\ncount = 0\n\nwhile not done:\n if year % 4 == 0:\n # Leap Year\n for month in l_year:\n for date in range(1, month + 1):\n if day_count == 6:\n day_count = 0\n else:\n day_count += 1\n day_name = week[day_count]\n if day_name == \"Sunday\" and date == 1:\n count += 1\n else:\n # Normal Year\n for month in n_year:\n for date in range(1, month + 1):\n if day_count == 6:\n day_count = 0\n else:\n day_count += 1\n day_name = week[day_count]\n if day_name == \"Sunday\" and date == 1:\n count += 1\n # Check Year\n if year < 2000:\n year += 1\n else:\n done = True\n\n\nprint(\"The Answer is: %i\" % count)\n# ==================================================\n\nend = time.time()\ntime = end - start\n\nprint(\"This took %s seconds\" % time)\n","sub_path":"python/000-050/euler019.py","file_name":"euler019.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"501981005","text":"import matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport numpy as np\r\nfrom scipy.integrate import odeint, ode, quad # ignore this?\r\nimport scipy.signal\r\nimport matplotlib.cm as cm\r\nimport matplotlib as mpl\r\nimport pandas as pd\r\nimport astropy.units as u\r\n#import seaborn as sns\r\n\r\n# physical constants\r\nme=9.109*10**-31 # mass of electron\r\nk=9*10**9 # constant\r\ne=1.602*10**-19 # charge of proton/electron\r\nZ=3 # ion charge\r\na0=0.529*10**-10 # bohr radius\r\nc=3.0*10**8 # speed of light\r\n\r\n# returns Coulomb force at a distance r = sqrt(x^2+y^2) from ion of charge Ze\r\ndef getForce(x, y):\r\n return -1*(k*Z*e**2)/(x**2+y**2)\r\n\r\n# may not need this section, maybe for Fourier transform?\r\n# axes constants\r\n#x_min = -1000 # x min\r\n#x_max = 1000 # x max\r\n#y_min = -1000 # y min\r\n#y_max = 1000 # y max\r\n\r\n#x = np.linspace(x_min,x_max,N)\r\n#y = np.linspace(y_min,y_max,N)\r\n\r\n# initial conditions\r\nx0 = -100*a0 # initial x position\r\ny0 = 100*a0 # initial y position\r\nv0x = 10**5 # initial x velocity\r\nv0y = 0 # initial y velocity\r\nN = 5000 # number of iterations\r\n\r\n# set up electron x,y positions\r\nrx = np.zeros(N) # electron x position\r\nry = np.zeros(N) # electron y position\r\nrx[0] = x0 # initial x position of e\r\nry[0] = y0 # initial y position of e\r\n\r\n#set up electron x,y velocities\r\nvx = np.zeros(N) # velocity in x of electron\r\nvy = np.zeros(N) # velocity in y of electron\r\nvx[0] = v0x # initial x velocity of e\r\nvy[0] = v0y # initial y velocity of e\r\n\r\n#set up electron x,y accelerations and angle between them\r\nax = np.zeros(N) # acceleration in x\r\nay = np.zeros(N) # acceleration in y\r\ntheta = np.arctan2(y0,x0) # angle between x and y components\r\nax[0] = np.cos(theta)*getForce(x0,y0)/me # initial acceleration in x\r\nay[0] = np.sin(theta)*getForce(x0,y0)/me # initial acceleration in y\r\n\r\n# time step\r\ndt = 1.6*10**-17\r\n# time array\r\ntime = np.zeros(N)\r\n\r\n\r\n# 2: setting up for loop using basic equations of motion\r\nfor i in range(1, N):\r\n time[i] = dt*i\r\n vx[i] = vx[i-1]+ax[i-1]*dt\r\n vy[i] = vy[i-1]+ay[i-1]*dt\r\n rx[i] = rx[i-1]+vx[i-1]*dt\r\n ry[i] = ry[i-1]+vy[i-1]*dt\r\n accel = getForce(rx[i-1],ry[i-1])/me\r\n theta = np.arctan2(ry[i-1],rx[i-1])\r\n ax[i] = accel*np.cos(theta)\r\n ay[i] = accel*np.sin(theta)\r\n\r\n# Setting up three plots for #3\r\nfig, (ax1, ax2, ax3) = plt.subplots(3, 1,figsize =[9,9])\r\nax1.plot(rx/a0, ry/a0)\r\nax1.set_xlabel('X Position ($a_{o}$)')\r\nax1.set_ylabel('Y Position ($a_{o}$)')\r\nax2.plot(time, vx, label='$v_{x}$')\r\nax2.plot(time, vy, label='$v_{y}$')\r\nax2.legend()\r\nax2.set_xlabel('Time (s)')\r\nax2.set_ylabel('Velocity ($m/s$)')\r\nax3.plot(time, ax, label='$a_{x}$')\r\nax3.plot(time, ay, label='$a_{y}$')\r\nax3.legend()\r\nax3.set_xlabel('Time (s)')\r\nax3.set_ylabel('Acceleration ($m/s^{2}$)')\r\n\r\nplt.show()\r\n\r\n\r\n# Now for part 4:\r\n\r\n# Define T for power Spectrum\r\nT = np.abs(2*x0/v0x)\r\n# Define Power spectrum from acceleration of electron\r\ndef PowerSpectrum(ax, ay, N, time):\r\n freq, spec = scipy.signal.periodogram(ax, N/time)\r\n return spec, freq\r\n\r\n# Calculates power spectrum\r\npowerspec, freq = PowerSpectrum(ax,ay,N,T) # gets spectrum and frequency\r\npowermax = np.max(powerspec) # calculates max of the spectrum\r\npowerspec = powerspec / powermax # scales spectrum to max of spectrum (dimensionless)\r\ncap = powerspec > 0.01 # caps spectrum\r\npowerspec = powerspec[cap] # redefines powerspec in capped area\r\nfreq = freq[cap] # redefines freq in capped area\r\n\r\n# Set up plot for power spectrum\r\nplt.plot(freq,powerspec)\r\nplt.title('Power Spectrum of One Electron')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Power Spectrum (Amplitude)')\r\n\r\nplt.show()\r\n# 5:\r\n# Run a series of simulations with different combinations of initial y positions\r\n# i.e. different “impact parameters” b and different initial velocities v.\r\n# Plot how the frequency of the peak of the power spectrum varies with b and v.\r\n\r\n# how many electrons we're going to plot\r\nM = 20\r\n\r\n# declare empty arrays here\r\nrxArray = []\r\nryArray = []\r\nvxArray = []\r\nvyArray = []\r\naxArray = []\r\nayArray = []\r\n\r\n# set range of random values for initial position here\r\nRANGE_MIN = -100\r\nRANGE_MAX = 100\r\n\r\n# set range of random values for initial y velocity\r\nY_RANGE_MIN = -10**5\r\nY_RANGE_MAX = 10**5\r\n\r\n# loop through M times to get values for each electron\r\nfor j in range(0, M):\r\n\t# get random value for initial x and y position\r\n\t# 0 value for initial position causes divide by 0 error so check\r\n\t# for that and try again if its 0, otherwise break out of the loop\r\n\twhile True:\r\n\t\tyvel = np.random.randint(Y_RANGE_MIN, Y_RANGE_MAX)\r\n\t\typos = np.random.randint(RANGE_MIN, RANGE_MAX)\r\n\t\tif yvel != 0 and ypos !=0:\r\n\t\t\tbreak\r\n\tprint(\"{}: yvel {} ypos {}\".format(j, yvel, ypos))\r\n\r\n\t# initial conditions with varying y position aka b and varying yvelocity\r\n\tx0 = -100*a0 # initial x position\r\n\ty0 = ypos*a0 # initial y position\r\n\tv0x = 10**5 # initial x velocity\r\n\tv0y = yvel # initial y velocity\r\n\tN = 5000 # number of iterations\r\n\r\n\t# set up electron x,y positions\r\n\trx = np.zeros(N) # electron x position\r\n\try = np.zeros(N) # electron y position\r\n\trx[0] = x0 # initial x position of e\r\n\try[0] = y0 # initial y position of e\r\n\r\n\t#set up electron x,y velocities\r\n\tvx = np.zeros(N) # velocity in x of electron\r\n\tvy = np.zeros(N) # velocity in y of electron\r\n\tvx[0] = v0x # initial x velocity of e\r\n\tvy[0] = v0y # initial y velocity of e\r\n\r\n\t#set up electron x,y accelerations and angle between them\r\n\tax = np.zeros(N) # acceleration in x\r\n\tay = np.zeros(N) # acceleration in y\r\n\ttheta = np.arctan2(y0,x0) # angle between x and y components\r\n\tax[0] = np.cos(theta)*getForce(x0,y0)/me # initial acceleration in x\r\n\tay[0] = np.sin(theta)*getForce(x0,y0)/me # initial acceleration in y\r\n\r\n\t# time step\r\n\tdt = 1.6*10**-17\r\n\t# time array\r\n\ttime = np.zeros(N)\r\n# 2: setting up for loop using basic equations of motion\r\n\tfor i in range(1, N):\r\n\t\ttime[i] = dt*i\r\n\t\tvx[i] = vx[i-1]+ax[i-1]*dt\r\n\t\tvy[i] = vy[i-1]+ay[i-1]*dt\r\n\t\trx[i] = rx[i-1]+vx[i-1]*dt\r\n\t\try[i] = ry[i-1]+vy[i-1]*dt\r\n\t\taccel = getForce(rx[i-1],ry[i-1])/me\r\n\t\ttheta = np.arctan2(ry[i-1],rx[i-1])\r\n\t\tax[i] = accel*np.cos(theta)\r\n\t\tay[i] = accel*np.sin(theta)\r\n\r\n\t# append this loop's data to the arrays\r\n\trxArray.append([])\r\n\trxArray.insert(j, rx)\r\n\r\n\tryArray.append([])\r\n\tryArray.insert(j, ry)\r\n\r\n\tvxArray.append([])\r\n\tvxArray.insert(j, vx)\r\n\r\n\tvyArray.append([])\r\n\tvyArray.insert(j, vy)\r\n\r\n\taxArray.append([])\r\n\taxArray.insert(j, ax)\r\n\r\n\tayArray.append([])\r\n\tayArray.insert(j, ay)\r\n\r\n# Setting up three plots for #3\r\nfig, (ax1, ax2, ax3) = plt.subplots(3, 1,figsize =[9,9])\r\n# plot 1: rx, ry\r\nfor i in range(0, M):\r\n\tax1.plot(rxArray[i]/a0, ryArray[i]/a0)\r\nax1.set_xlabel('X Position ($a_{o}$)')\r\nax1.set_ylabel('Y Position ($a_{o}$)')\r\n# plot 2: vx, vy\r\nfor i in range(0, M):\r\n\tax2.plot(time, vxArray[i], label='$v_{x}$')\r\n\tax2.plot(time, vyArray[i], label='$v_{y}$')\r\nax2.legend()\r\nax2.set_xlabel('Time (s)')\r\nax2.set_ylabel('Velocity ($m/s$)')\r\n# plot 3: ax, ay\r\nfor i in range(0, M):\r\n\tax3.plot(time, axArray[i], label='$a_{x}$')\r\n\tax3.plot(time, ayArray[i], label='$a_{y}$')\r\nax3.legend()\r\nax3.set_xlabel('Time (s)')\r\nax3.set_ylabel('Acceleration ($m/s^{2}$)')\r\n\r\nplt.show()\r\n\r\n\r\n# Now for part 4:\r\n\r\n# Define T for power Spectrum\r\nT = np.abs(2*x0/v0x)\r\n# Define Power spectrum from acceleration of electron\r\ndef PowerSpectrum(ax, ay, N, time):\r\n freq, spec = scipy.signal.periodogram(np.sqrt(ax**2+ay**2), N/time)\r\n return spec, freq\r\n\r\nfreqmaxs = []\r\n# Calculates power spectrum for 20 electrons\r\nfor i in range(0, M):\r\n powerspec, freq = PowerSpectrum(axArray[i],ayArray[i],N,T) # gets spectrum and frequency\r\n powermax_id = np.argmax(powerspec) # calculates max of the spectrum\r\n fmax = freq[powermax_id] #frequency of highest power\r\n freqmaxs.append(fmax)\r\n powerspec = powerspec / powermax_id\r\n cap = powerspec > 0.01 # caps spectrum\r\n powerspec = powerspec[cap] # redefines powerspec in capped area\r\n freq = freq[cap]\r\n plt.plot(freq,powerspec)\r\n #plt.xlim([-1.0, 2.5])\r\n plt.title('Power Spectrum of 20 Random Electrons')\r\n plt.xlabel('Frequency (Hz)')\r\n plt.ylabel('Power Spectrum (Amplitude)')\r\n #plt.yscale('log')\r\n\r\n\r\nplt.show()\r\n\r\n# Set up plot for max power frequency versus impact parameter b\r\n#something is off for this not sure what it is\r\n\r\n# Calculate impact parameter\r\nimpact_param = np.zeros(M)\r\n\r\nfor i in range(0, M):\r\n impact_param[i] = np.abs(ypos)\r\n plt.plot(impact_param,freqmaxs, '.')\r\n #plt.ylim()\r\n #plt.xlim()\r\n plt.title('Peak Power Frequency vs. Impact Parameter')\r\n plt.xlabel('Impact Parameter (m)')\r\n plt.ylabel('Peak Power Frequency (Hz)')\r\n\r\n\r\nplt.show()\r\n\r\nyvelocity = np.zeros(M)\r\n\r\nfor i in range(0, M):\r\n yvelocity[i] = np.abs(yvel)\r\n plt.plot(yvelocity,freqmaxs, '.')\r\n plt.title('Peak Power Frequency vs. Velocity')\r\n plt.xlabel('Velocity (m/s)')\r\n plt.ylabel('Peak Power Frequency (Hz)')\r\n\r\nplt.show()\r\n","sub_path":"HW3/hw3_try5.py","file_name":"hw3_try5.py","file_ext":"py","file_size_in_byte":9316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"642049449","text":"\nfrom urllib import request\nimport re\n\nclass Spider(object):\n url = \"https://www.huya.com/g/2336\"\n rootpatten = '([\\s\\S]*?)'\n prenamepatten = ''\n namepatten = '\"([\\s\\S]*?)\"'\n numberpatten = '([\\s\\S]*?)'\n\n def __fetch_content(self):\n r = request.urlopen(self.url)\n htmls = r.read()\n htmls = str(htmls,encoding='utf-8')\n #print(htmls)\n return htmls\n\n def __analysis(self,htmls):\n roothtml = re.findall(self.rootpatten,htmls) \n anchors = []\n for html in roothtml:\n prename = re.findall(self.prenamepatten,html)\n name = re.findall(self.namepatten,str(prename))\n number = re.findall(self.numberpatten,html)\n anchor = {'name':name,'number':number}\n anchors.append(anchor)\n return anchors\n \n def __sortseed(self,anchor):\n r = re.findall(\"\\d\",str(anchor['number']))\n number = 0\n if '万' in str(anchor['number']): \n for rank in range(0,len(r)): \n number = number*10+float(r[rank])\n number = number*10000\n return number\n\n def __sort(self,anchors):\n anchors = sorted(anchors,key=self.__sortseed,reverse=True)\n return anchors\n\n def __show(self,anchors):\n for rank in range(0,len(anchors)):\n print('rank' + ':' + str(rank+1) + str(anchors[rank]['name']) + '------' + str(anchors[rank]['number']))\n\n def go(self):\n htmls = self.__fetch_content()\n anchors = self.__analysis(htmls)\n anchors = self.__sort(anchors)\n self.__show(anchors)\n\nspider = Spider()\nspider.go()\n\n \n\n\n\n\n","sub_path":"python/spider/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"649297016","text":"import numpy as np\r\nfrom collections import Counter\r\nimport pandas as pd\r\nimport pickle\r\nimport re\r\nfrom numpy import pi\r\nfrom itertools import chain\r\nfrom bokeh.plotting import figure, show\r\nfrom bokeh.models import BasicTickFormatter, HoverTool, BoxSelectTool,BoxZoomTool, ResetTool, Span, OpenURL\r\nfrom bokeh.models import NumeralTickFormatter, WheelZoomTool, PanTool, SaveTool, ColumnDataSource,LinearAxis, Range1d\r\nfrom bokeh.models.widgets import Select, RadioGroup,DataTable, StringFormatter, TableColumn, NumberFormatter, Button\r\nfrom bokeh.layouts import widgetbox, row, column\r\nfrom bokeh.io import curdoc\r\nimport webbrowser\r\n\r\ncurdoc().clear()\r\ncurdoc().title = 'OPCOSTS'\r\ndef find_totals(data):\r\n totals = {}\r\n for a in data['Sites'].unique():\r\n d = []\r\n for b in data.index:\r\n if data['Site'][b]==a:\r\n d.append(data['YTD Actual'][b])\r\n totals[a]=sum(d)\r\n return totals\r\ndef totaldict(data):\r\n outer = {}\r\n\r\n for x in data['Site'].unique():\r\n inner = {}\r\n num = 0\r\n for y in data['Main Description'].unique():\r\n\r\n list1 = []\r\n for a in data.index:\r\n if data['Site'][a]==x and data['Main Description'][a]==y:\r\n list1.append(data['YTD Actual'][a])\r\n inner[y]=sum(list1)\r\n num+=sum(list1)\r\n inner['Total']=num\r\n outer[x]= inner\r\n return outer\r\ndef get_source(start, end, site, desc, subdesc='Total'):\r\n x = [a for a in range(start,end+1)]\r\n y = func1(start, end, site, desc, subdesc)\r\n source = ColumnDataSource(data=dict(x=x, y=y))\r\n return source\r\ndef get_sourcepsqft(start, end, site, desc, subdesc = 'Total'):\r\n x = [a for a in range(start,end+1)]\r\n y = func1psqft(start, end, site, desc, subdesc)\r\n source = ColumnDataSource(data=dict(x=x, y=y))\r\n return source\r\ndef func1(start, end, site, desc, subdesc='Total'):\r\n list1=[]\r\n for x in range(start,end+1):\r\n try:\r\n # list1.append(data[str(x)][str(site)][str(desc)])\r\n if desc == 'Total':\r\n list1.append(xyz[str(x)][str(site)][str(desc)])\r\n else:\r\n list1.append(xyz[str(x)][str(site)][str(desc)][str(subdesc)])\r\n except Exception as e:\r\n list1.append(int(0))\r\n return list1\r\ndef func1psqft(start, end, site, desc, subdesc='Total'):\r\n list1=[]\r\n for x in range(start, end+1):\r\n try:\r\n # list1.append(data[str(x)][str(site)][str(desc)]/AREA[str(x)][str(site)])\r\n if desc == 'Total':\r\n list1.append(xyz[str(x)][str(site)][str(desc)]/AREA[str(x)][str(site)])\r\n else:\r\n list1.append(xyz[str(x)][str(site)][str(desc)][str(subdesc)]/AREA[str(x)][str(site)])\r\n except Exception as e:\r\n list1.append(int(0))\r\n return list1\r\ndef make_stuff2(a,x) :\r\n ww = {}\r\n for each in x:\r\n if data['2017'][each][str(a)]/AREA['2017'][each] > 0:\r\n ww[each] = data['2017'][each][str(a)]/AREA['2017'][each]\r\n else:\r\n ww[each] = 0\r\n return ww\r\ndef make_stuff3(a,x,year, subdesc = 'Total') :\r\n ww = {}\r\n for each in x:\r\n if a != 'Total':\r\n # print(each, year, a,data[str(year)][each][str(a)],AREA[str(year)][each])\r\n # if data[str(year)][each][str(a)]/AREA[str(year)][each] > 0:\r\n if xyz[str(year)][each][str(a)][subdesc] / AREA[str(year)][each] > 0:\r\n try:\r\n ww[each] = xyz[str(year)][each][str(a)][subdesc]/AREA[str(year)][each]\r\n except Exception as e:\r\n print(each, 'NONONONONONONONONONO')\r\n ww[each] = 0\r\n else:\r\n print('this fails')\r\n else:\r\n if xyz[str(year)][each][str(a)] / AREA[str(year)][each] > 0:\r\n try:\r\n ww[each] = xyz[str(year)][each][str(a)]/AREA[str(year)][each]\r\n except Exception as e:\r\n print(each, 'NONONONONONONONONONO')\r\n ww[each] = 0\r\n\r\n return ww\r\ndef sorting(dict1, tick=1):\r\n xx=[]\r\n yy=[]\r\n aq = sorted(dict1, key = dict1.get, reverse=True)\r\n for prop in aq:\r\n xx.append(prop)\r\n yy.append(dict1[prop])\r\n if tick == 1:\r\n yy = [y for y in yy if y > 0]\r\n xx = xx[:len(yy)]\r\n return xx, yy\r\ndef makepie(data):\r\n q = []\r\n r = []\r\n cats, numbs = sorting(data)\r\n cats = cats[::-1]\r\n numbs = numbs[::-1]\r\n numbs1 = [x / numbs[-1] for x in numbs]\r\n numbs1 = [0] + numbs1\r\n numbs2 = [y * 100 for y in numbs1]\r\n for x in numbs1:\r\n q.append(x)\r\n r.append(sum(q))\r\n starts = [p * 2 * pi for p in r[:-1]]\r\n ends = [p * 2 * pi for p in r[1:]]\r\n starts = starts[:-1]\r\n ends = ends[:-1]\r\n perc = numbs2[1:]\r\n\r\n colors = ['#238b45', '#41ab5d', '#74c476', '#a1d99b', '#00441b', '#006d2c', '#c7e9c0', '#e5f5e0', '#f7fcf5',\r\n '#fff5eb', '#d9f0a3', '#f7fcb9', '#ffffe5']\r\n colors.reverse()\r\n\r\n sourcepie = ColumnDataSource(\r\n data=dict(\r\n x=[0 for x in numbs1],\r\n y=[0 for x in numbs1],\r\n ymin=[0.35 for x in numbs1],\r\n ymax=[0.9 for x in numbs1],\r\n percents=numbs2[::-1][1:],\r\n category=cats[::-1][1:],\r\n starts=starts[::-1],\r\n colors=colors[::-1],\r\n ends=ends[::-1],\r\n )\r\n )\r\n\r\n return sourcepie\r\ndef updateproperty(attr, old, new):\r\n global xxx\r\n select3.value = 'Total'\r\n source.data = get_source(start, end, str(new), str(select2.value), str(select3.value)).data\r\n sourceLine2.data = get_sourcepsqft(start, end, str(new), str(select2.value), str(select3.value)).data\r\n source2018.data = get_source(end, end2, str(new), str(select2.value), str(select3.value)).data\r\n sourceLine22018.data = get_sourcepsqft(end, end2, str(new), str(select2.value), str(select3.value)).data\r\n p.extra_y_ranges['foo'].end=max(sourceLine2.data['y']+ sourceLine22018.data['y']) * 1.2\r\n p.extra_y_ranges['foo'].start = min(sourceLine2.data['y']+ sourceLine22018.data['y']) * 0.8\r\n p.title.text = new + \" Operational Costs\"\r\n w.title.text = str(selectyear.value)+ ' ' +str(select2.value) +\" Breakdown\"\r\n\r\n if max(sourceLine2.data['y']) < 3:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0')\r\n if str(selectprovince.value) != 'All':\r\n z.title.text = str(selectyear.value) + ' ' + str(selectprovince.value) + ' Portfolio ' + str(\r\n select2.value) + ' Per Total Leasable Area'\r\n xxr, yyr = sorting(make_stuff2(str(select2.value), [x for x in provs if provs[x] == str(selectprovince.value)]))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), [x for x in provs if provs[x] == str(selectprovince.value)], selectyear.value, select3.value))\r\n xxr2, yyr2 = sorting(\r\n make_stuff2(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])]))\r\n xxr2, yyr2 = sorting(\r\n make_stuff3(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])],\r\n selectyear.value, select3.value))\r\n else:\r\n z.title.text = str(selectyear.value) + ' Portfolio ' + str(select2.value) + ' Per Total Leasable Area'\r\n xxr, yyr = sorting(make_stuff2(str(select2.value),newoff))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value),newoff, selectyear.value, select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])], selectyear.value, select3.value))\r\n for att in range(len(xxr)):\r\n if xxr[att] == new:\r\n ddr = att + 0.5\r\n span.location = ddr\r\n sourcepsqft2.data = ColumnDataSource(data=dict(x=list(xxr2), y=list(yyr2), desc=list(xxr), max=list(yyr))).data\r\n sourcepsqft.data = ColumnDataSource(data=dict(x=list(xxr), y=list(yyr), desc=list(xxr), max=list(yyr))).data\r\n yyy = ['${:.2f}'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]), '${:.2f}'.format(sum(list(yyr))/len(list(yyr))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr))/len(list(yyr)))),'${:.2f}'.format(sum(list(yyr2))/len(list(yyr2))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr2))/len(list(yyr2)))),\r\n '${:.2f}'.format(np.std(list(yyr))), '${:.2f}'.format(np.percentile(list(yyr), 25)),\r\n '{:.2f}%'.format(((source.data['y'][-1]/(source.data['y'][-2]+1))-1)*100)]\r\n xxx = ['{}'.format(select.value), 'National Average', 'Compared to National Average',\r\n '{} Average'.format(provs[select.value]), 'Compared To Provincial Average', 'Variance', 'Top 25%',\r\n '2016 vs 2017']\r\n if max(yyr) < 3:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0')\r\n smalltable.data = ColumnDataSource(data=dict(x=xxx, y=yyy)).data\r\n if select2.value != 'Total':\r\n sourcepie.data = makepie(xyz[str(selectyear.value)][str(new)][str(select2.value)]).data\r\n ss, tt = sorting(xyz[str(selectyear.value)][str(new)][str(select2.value)])\r\n else:\r\n sourcepie.data = makepie(data[str(selectyear.value)][str(new)]).data\r\n ss, tt = sorting(data[str(selectyear.value)][str(select.value)])\r\n ww = [x / max(tt) for x in tt]\r\n zz = [x / AREA[selectyear.value][select.value] for x in tt]\r\n bigtable.data = ColumnDataSource(data=dict(x=ss, y=tt, w=ww, z=zz)).data\r\ndef updateopcost(attr, old, new):\r\n global smalltable\r\n global xxx\r\n select3.value =desc\r\n source.data = get_source(start, end, str(select.value), str(new)).data\r\n sourceLine2.data = get_sourcepsqft(start, end, str(select.value), str(new)).data\r\n source2018.data = get_source(end, end2, str(select.value), str(new)).data\r\n sourceLine22018.data = get_sourcepsqft(end, end2, str(select.value), str(new)).data\r\n p.extra_y_ranges['foo'].end = max(sourceLine2.data['y'] + sourceLine22018.data['y']) * 1.2\r\n p.extra_y_ranges['foo'].start = min(sourceLine2.data['y']+ sourceLine22018.data['y']) * 0.8\r\n if max(sourceLine2.data['y']) < 3:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0')\r\n w.title.text = str(selectyear.value) + ' ' + str(new) + \" Breakdown\"\r\n z.title.text = str(selectyear.value) + ' ' + str(selectprovince.value) + ' Portfolio ' + str(new) + ' Per Total Leasable Area'\r\n if select2.value != 'Total':\r\n sourcepie.data = makepie(xyz[str(selectyear.value)][str(select.value)][str(new)]).data\r\n ss, tt = sorting(xyz[str(selectyear.value)][str(select.value)][str(new)])\r\n else:\r\n sourcepie.data = makepie(data[str(selectyear.value)][str(select.value)]).data\r\n ss, tt = sorting(data[str(selectyear.value)][str(select.value)])\r\n ww = [x / max(tt) for x in tt]\r\n zz = [x / AREA[selectyear.value][select.value] for x in tt]\r\n bigtable.data = ColumnDataSource(data=dict(x=ss, y=tt, w=ww, z=zz)).data\r\n if selectprovince.value != 'All':\r\n xxr, yyr = sorting(make_stuff2(str(new),[x for x in provs if provs[x] == str(selectprovince.value)]))\r\n xxr, yyr = sorting(make_stuff3(str(new), [x for x in provs if provs[x] == str(selectprovince.value)], selectyear.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(new), [x[0] for x in provs.items() if x[1] == str(provs[select.value])]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(new), [x[0] for x in provs.items() if x[1] == str(provs[select.value])], selectyear.value))\r\n else:\r\n xxr, yyr = sorting(make_stuff2(str(new),newoff))\r\n xxr, yyr = sorting(make_stuff3(str(new), newoff, selectyear.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(new), [x[0] for x in provs.items() if x[1] == str(provs[select.value])]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(new), [x[0] for x in provs.items() if x[1] == str(provs[select.value])], selectyear.value))\r\n z.x_range.factors = []\r\n z.x_range.factors = list(xxr)\r\n if max(yyr) < 3:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0')\r\n sourcepsqft2.data = ColumnDataSource(data=dict(x=list(xxr2), y=list(yyr2), desc=list(xxr), max=list(yyr))).data\r\n sourcepsqft.data = ColumnDataSource(data=dict(x=list(xxr), y=list(yyr), desc=list(xxr), max=list(yyr))).data\r\n yyy = ['${:.2f}'.format(make_stuff3(str(select2.value), newoff, selectyear.value)[select.value]), '${:.2f}'.format(sum(list(yyr))/len(list(yyr))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value)[select.value]/(sum(list(yyr))/len(list(yyr)))),'${:.2f}'.format(sum(list(yyr2))/len(list(yyr2))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value)[select.value]/(sum(list(yyr2))/len(list(yyr2)))),\r\n '${:.2f}'.format(np.std(list(yyr))), '${:.2f}'.format(np.percentile(list(yyr), 25)),\r\n '{:.2f}%'.format(((source.data['y'][-1]/(source.data['y'][-2]+1))-1)*100)]\r\n xxx = ['{}'.format(select.value), 'National Average', 'Compared to National Average',\r\n '{} Average'.format(provs[select.value]), 'Compared To Provincial Average', 'Variance', 'Top 25%',\r\n '2016 vs 2017']\r\n smalltable.data = ColumnDataSource(data=dict(x=xxx, y=yyy)).data\r\n for att in range(len(xxr)):\r\n if xxr[att] == select.value:\r\n dde = att + 0.5\r\n span.location = dde\r\n for x in sublist:\r\n if str(new) == x:\r\n select3.options = sublist[x] + ['Total']\r\n elif str(new) == 'Total':\r\n select3.options = ['Total']\r\ndef updatesubopcost(attr, old, new):\r\n if str(new) == 'Total':\r\n if select2.value == 'Total':\r\n pass\r\n source.data = get_source(start, end, str(select.value), str(select2.value), str(new)).data\r\n sourceLine2.data = get_sourcepsqft(start, end, str(select.value), str(select2.value),str(new)).data\r\n source2018.data = get_source( end, end2,str(select.value), str(select2.value), str(new)).data\r\n sourceLine22018.data = get_sourcepsqft(end,end2, str(select.value), str(select2.value),str(new)).data\r\n p.extra_y_ranges['foo'].end = max(sourceLine2.data['y']+sourceLine22018.data['y']) * 1.2\r\n p.extra_y_ranges['foo'].start = min(sourceLine2.data['y']+sourceLine22018.data['y']) * 0.8\r\n if max(sourceLine2.data['y']) < 3:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0')\r\n if selectprovince.value != 'All':\r\n xxr, yyr = sorting(make_stuff2(str(select2.value),[x for x in provs if provs[x] == str(selectprovince.value)]))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), [x for x in provs if provs[x] == str(selectprovince.value)], selectyear.value, select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])], selectyear.value, select3.value))\r\n else:\r\n xxr, yyr = sorting(make_stuff2(str(select2.value), newoff))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), newoff,\r\n selectyear.value, select3.value))\r\n xxr2, yyr2 = sorting(\r\n make_stuff2(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])]))\r\n xxr2, yyr2 = sorting(\r\n make_stuff3(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])],\r\n selectyear.value, select3.value))\r\n\r\n z.x_range.factors = []\r\n z.x_range.factors = list(xxr)\r\n sourcepsqft2.data = ColumnDataSource(data=dict(x=list(xxr2), y=list(yyr2), desc=list(xxr), max=list(yyr))).data\r\n sourcepsqft.data = ColumnDataSource(data=dict(x=list(xxr), y=list(yyr), desc=list(xxr), max=list(yyr))).data\r\n yyy = ['${:.2f}'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]), '${:.2f}'.format(sum(list(yyr))/len(list(yyr))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr))/len(list(yyr)))),'${:.2f}'.format(sum(list(yyr2))/len(list(yyr2))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr2))/len(list(yyr2)))),\r\n '${:.2f}'.format(np.std(list(yyr))), '${:.2f}'.format(np.percentile(list(yyr), 25)),\r\n '{:.2f}%'.format(((source.data['y'][-1]/(source.data['y'][-2]+1))-1)*100)]\r\n xxx = ['{}'.format(select.value), 'National Average', 'Compared to National Average',\r\n '{} Average'.format(provs[select.value]), 'Compared To Provincial Average', 'Variance', 'Top 25%',\r\n '2016 vs 2017']\r\n smalltable.data = ColumnDataSource(data=dict(x=xxx, y=yyy)).data\r\n for att in range(len(xxr)):\r\n if xxr[att] == select.value:\r\n dde = att + 0.5\r\n span.location = dde\r\n if max(yyr) < 3:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0')\r\ndef updateprovince(attr, old, new):\r\n asdf = select.value\r\n if str(new) == 'All':\r\n select.options=sorted(list(newoff))\r\n if asdf in select.options:\r\n select.value = asdf\r\n else:\r\n select.value = select.options[0]\r\n source.data = get_source(start, end, str(select.value), str(select2.value), str(select3.value)).data\r\n sourceLine2.data = get_sourcepsqft(start, end, str(select.value), str(select2.value), str(select3.value)).data\r\n source2018.data = get_source(end, end2, str(select.value), str(select2.value), str(select3.value)).data\r\n sourceLine22018.data = get_sourcepsqft(end, end2, str(select.value), str(select2.value),str(select3.value)).data\r\n p.extra_y_ranges['foo'].end = max(sourceLine2.data['y'] + sourceLine22018.data['y']) * 1.2\r\n p.extra_y_ranges['foo'].start = min(sourceLine2.data['y'] + sourceLine22018.data['y']) * 0.8\r\n p.title.text = str(select.value) + \" Operational Costs\"\r\n w.title.text = str(selectyear.value) + ' ' + str(select2.value) + \" Breakdown\"\r\n if max(sourceLine2.data['y']) < 3:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0')\r\n xxr, yyr = sorting(make_stuff2(str(select2.value), newoff))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), newoff, str(selectyear.value), select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if\r\n x[1] == str(\r\n provs[select.value])]]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if\r\n x[1] == str(\r\n provs[select.value])]],\r\n str(selectyear.value), select3.value))\r\n else:\r\n z.title.text = str(selectyear.value)+ ' ' + str(new) + ' Portfolio ' + str(select2.value) + ' Per Total Leasable Area'\r\n select.options = sorted([x for x in provs if provs[x] == str(new)])\r\n if asdf in select.options:\r\n select.value = asdf\r\n else:\r\n select.value = select.options[0]\r\n source.data = get_source(start, end, str(select.value), str(select2.value), str(select3.value)).data\r\n sourceLine2.data = get_sourcepsqft(start, end, str(select.value), str(select2.value), str(select3.value)).data\r\n source2018.data = get_source(end, end2, str(select.value), str(select2.value), str(select3.value)).data\r\n sourceLine22018.data = get_sourcepsqft(end, end2, str(select.value), str(select2.value), str(select3.value)).data\r\n p.extra_y_ranges['foo'].end=max(sourceLine2.data['y']+ sourceLine22018.data['y']) * 1.2\r\n p.extra_y_ranges['foo'].start = min(sourceLine2.data['y']+ sourceLine22018.data['y']) * 0.8\r\n p.title.text = new + \" Operational Costs\"\r\n w.title.text = str(selectyear.value)+ ' ' +str(select2.value) +\" Breakdown\"\r\n if max(sourceLine2.data['y']) < 3:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n p.yaxis[1].formatter = NumeralTickFormatter(format='$0,0')\r\n\r\n xxr, yyr = sorting(make_stuff2(str(select2.value), [x for x in provs if provs[x] == str(new)]))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), [x for x in provs if provs[x] == str(new)], str(selectyear.value), select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if x[1] == str(provs[select.value])]]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if x[1] == str(provs[select.value])]],str(selectyear.value), select3.value))\r\n z.x_range.factors = []\r\n z.x_range.factors = list(xxr)\r\n sourcepsqft2.data = ColumnDataSource(data=dict(x=list(xxr2), y=list(yyr2), desc=list(xxr), max=list(yyr))).data\r\n sourcepsqft.data = ColumnDataSource(data=dict(x=list(xxr), y=list(yyr), desc=list(xxr), max=list(yyr))).data\r\n yyy = ['${:.2f}'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]), '${:.2f}'.format(sum(list(yyr))/len(list(yyr))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr))/len(list(yyr)))),'${:.2f}'.format(sum(list(yyr2))/len(list(yyr2))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr2))/len(list(yyr2)))),\r\n '${:.2f}'.format(np.std(list(yyr))), '${:.2f}'.format(np.percentile(list(yyr), 25)),\r\n '{:.2f}%'.format(((source.data['y'][-1]/(source.data['y'][-2]+1))-1)*100)]\r\n xxx = ['{}'.format(select.value), 'National Average', 'Compared to National Average',\r\n '{} Average'.format(provs[select.value]), 'Compared To Provincial Average', 'Variance', 'Top 25%',\r\n '2016 vs 2017']\r\n smalltable.data = ColumnDataSource(data=dict(x=xxx, y=yyy)).data\r\n for att in range(len(xxr)):\r\n if xxr[att] == select.value:\r\n dde = att + 0.5\r\n span.location = dde\r\n if max(yyr) < 3:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0')\r\ndef updateyear(attr,old,new):\r\n global smalltable\r\n global xxx\r\n w.title.text = str(new)+ ' ' +str(select2.value) +\" Breakdown\"\r\n\r\n if select2.value != 'Total':\r\n sourcepie.data = makepie(xyz[str(new)][str(select.value)][str(select2.value)]).data\r\n ss, tt = sorting(xyz[str(new)][str(select.value)][str(select2.value)])\r\n else:\r\n sourcepie.data = makepie(data[str(new)][str(select.value)]).data\r\n ss, tt = sorting(data[str(new)][str(select.value)])\r\n ww = [x / max(tt) for x in tt]\r\n zz = [x / AREA[str(new)][select.value] for x in tt]\r\n bigtable.data = ColumnDataSource(data=dict(x=ss, y=tt, w=ww, z=zz)).data\r\n if selectprovince.value != 'All':\r\n z.title.text = str(new) + ' ' + str(selectprovince.value) + ' Portfolio ' + str(select2.value) + ' Per Total Leasable Area'\r\n xxr, yyr = sorting(make_stuff2(str(select2.value),[x for x in provs if provs[x] == str(selectprovince.value)]))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), [x for x in provs if provs[x] == str(selectprovince.value)], str(new), select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])], str(new), select3.value))\r\n else:\r\n z.title.text = str(new) + ' Portfolio ' + str(select2.value) + ' Per Total Leasable Area'\r\n xxr, yyr = sorting(make_stuff2(str(select2.value), newoff))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), newoff, str(new), select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if x[1] == str(provs[select.value])]]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if x[1] == str(provs[select.value])]], str(new), select3.value))\r\n z.x_range.factors = []\r\n z.x_range.factors = list(xxr)\r\n sourcepsqft2.data = ColumnDataSource(data=dict(x=list(xxr2), y=list(yyr2), desc=list(xxr), max=list(yyr))).data\r\n sourcepsqft.data = ColumnDataSource(data=dict(x=list(xxr), y=list(yyr), desc=list(xxr), max=list(yyr))).data\r\n yyy = ['${:.2f}'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]), '${:.2f}'.format(sum(list(yyr))/len(list(yyr))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr))/len(list(yyr)))),'${:.2f}'.format(sum(list(yyr2))/len(list(yyr2))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr2))/len(list(yyr2)))),\r\n '${:.2f}'.format(np.std(list(yyr))), '${:.2f}'.format(np.percentile(list(yyr), 25)),\r\n '{:.2f}%'.format(((source.data['y'][-1]/(source.data['y'][-2]+1))-1)*100)]\r\n xxx = ['{}'.format(select.value), 'National Average', 'Compared to National Average',\r\n '{} Average'.format(provs[select.value]), 'Compared To Provincial Average', 'Variance', 'Top 25%',\r\n '2016 vs 2017']\r\n smalltable.data = ColumnDataSource(data=dict(x=xxx, y=yyy)).data\r\n for att in range(len(xxr)):\r\n if xxr[att] == select.value:\r\n dde = att + 0.5\r\n span.location = dde\r\n if max(yyr) < 3:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0')\r\ndef updatedenominator(attr,old,new):\r\n global smalltable\r\n global xxx\r\n w.title.text = str(new)+ ' ' +str(select2.value) +\" Breakdown\"\r\n z.title.text = str(new) + ' Portfolio '+str(select2.value)+' Per Total Leasable Area'\r\n if select2.value != 'Total':\r\n sourcepie.data = makepie(xyz[str(new)][str(select.value)][str(select2.value)]).data\r\n ss, tt = sorting(xyz[str(new)][str(select.value)][str(select2.value)])\r\n else:\r\n sourcepie.data = makepie(data[str(new)][str(select.value)]).data\r\n ss, tt = sorting(data[str(new)][str(select.value)])\r\n ww = [x / max(tt) for x in tt]\r\n zz = [x / AREA[str(new)][select.value] for x in tt]\r\n bigtable.data = ColumnDataSource(data=dict(x=ss, y=tt, w=ww, z=zz)).data\r\n if radio_group.active == 2:\r\n xxr, yyr = sorting(make_stuff2(str(select2.value),newoff))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), newoff, str(new), select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x[0] for x in provs.items() if x[1] == str(provs[select.value])], str(new), select3.value))\r\n elif radio_group.active == 1:\r\n xxr, yyr = sorting(make_stuff2(str(select2.value), newoff))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), newoff, str(new), select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if x[1] == str(provs[select.value])]]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if x[1] == str(provs[select.value])]], str(new), select3.value))\r\n elif radio_group.active == 0:\r\n xxr, yyr = sorting(make_stuff2(str(select2.value), newoff))\r\n xxr, yyr = sorting(make_stuff3(str(select2.value), newoff, str(new), select3.value))\r\n xxr2, yyr2 = sorting(make_stuff2(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if x[1] == str(provs[select.value])]]))\r\n xxr2, yyr2 = sorting(make_stuff3(str(select2.value), [x for x in newoff if x in [x[0] for x in provs.items() if x[1] == str(provs[select.value])]],str(new), select3.value))\r\n z.x_range.factors = []\r\n z.x_range.factors = list(xxr)\r\n sourcepsqft2.data = ColumnDataSource(data=dict(x=list(xxr2), y=list(yyr2), desc=list(xxr), max=list(yyr))).data\r\n sourcepsqft.data = ColumnDataSource(data=dict(x=list(xxr), y=list(yyr), desc=list(xxr), max=list(yyr))).data\r\n yyy = ['${:.2f}'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]), '${:.2f}'.format(sum(list(yyr))/len(list(yyr))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr))/len(list(yyr)))),'${:.2f}'.format(sum(list(yyr2))/len(list(yyr2))),\r\n '{:.2f}X'.format(make_stuff3(str(select2.value), newoff, selectyear.value, select3.value)[select.value]/(sum(list(yyr2))/len(list(yyr2)))),\r\n '${:.2f}'.format(np.std(list(yyr))), '${:.2f}'.format(np.percentile(list(yyr), 25)),\r\n '{:.2f}%'.format(((source.data['y'][-1]/(source.data['y'][-2]+1))-1)*100)]\r\n xxx = ['{}'.format(select.value), 'National Average', 'Compared to National Average',\r\n '{} Average'.format(provs[select.value]), 'Compared To Provincial Average', 'Variance', 'Top 25%',\r\n '2016 vs 2017']\r\n smalltable.data = ColumnDataSource(data=dict(x=xxx, y=yyy)).data\r\n for att in range(len(xxr)):\r\n if xxr[att] == select.value:\r\n dde = att + 0.5\r\n span.location = dde\r\n if max(yyr) < 3:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0.00')\r\n else:\r\n z.yaxis.formatter = NumeralTickFormatter(format='$0,0')\r\ndef getAreas2(y, year):\r\n pp = None\r\n new_area_group = []\r\n for x in AREA[str(year)].keys():\r\n if x in y:\r\n new_area_group.append(AREA[str(year)][x])\r\n pp = sum(new_area_group)\r\n return pp\r\ndef totaldict2(data):\r\n outer = {}\r\n\r\n for x in data['Site'].unique():\r\n inner = {}\r\n num = 0\r\n for y in data['Main Description'].unique():\r\n bb = {}\r\n for z in data['Description'].unique():\r\n for a in data.itertuples():\r\n list1 = []\r\n if a[3]==x and a[5]==y and a[8]==z:\r\n list1.append(a[9])\r\n bb.update({z:a[9]})\r\n ww=sum(bb.values())\r\n bb['Total']=ww\r\n num+=ww\r\n inner[y]=bb\r\n inner['Total']=num\r\n outer[x]= inner\r\n return outer\r\ndef office():\r\n pickle_in9 = open(\"off.pickle\", \"rb\")\r\n off = pickle.load(pickle_in9)\r\n off = list(off)\r\n office = pd.read_csv('office.csv')\r\n of = []\r\n for x in office.itertuples():\r\n # print(x[0],x[1])\r\n if x[2] == 'y':\r\n of.append(x[1])\r\n off = [x for x in off if x not in of]\r\n return off\r\ndef totalareas():\r\n dir = 'C:/Users/J_Ragbeer/PycharmProjects/opcost/data/'\r\n allareas = pd.read_csv('AREA.csv')\r\n p=[]\r\n for x in allareas['Building GLA']:\r\n y = str(x)\r\n if y != 'nan':\r\n p.append(int(x.replace(',','')))\r\n else:\r\n p.append(0)\r\n allareas['Building GLA']=pd.Series(p)\r\n allareas.fillna(method='ffill', inplace = True)\r\n allareas['Year']=allareas['Year'].astype('int')\r\n allareas['Building GLA']=allareas['Building GLA'].astype('int')\r\n r = allareas.groupby(['Building', 'Year'])['Building GLA'].mean()\r\n w=r.index.tolist()\r\n yearlist = []\r\n buildinglist = []\r\n for x in w:\r\n building, year = x\r\n buildinglist.append(building)\r\n yearlist.append(year)\r\n d = {'Building':buildinglist, 'Year':yearlist,'GLA':list(r)}\r\n df = pd.DataFrame(data = d)\r\n data={}\r\n\r\n def makeareas(df,yy):\r\n rr = {}\r\n for x in df.itertuples():\r\n if x[2] == yy:\r\n rr.update({x[1]:x[3]})\r\n return rr\r\n for x in years:\r\n data[str(x)]=makeareas(df, x)\r\n\r\n return data\r\ndef getAreas(y,year):\r\n pp = None\r\n new_area_group = []\r\n for x in AREA[str(year)].keys():\r\n if re.search(str(y),x, re.IGNORECASE):\r\n new_area_group.append(AREA[str(year)][x])\r\n pp = sum(new_area_group)\r\n return pp\r\ndef combineProps(year, group):\r\n dr = []\r\n rrr = None\r\n for key, value in data[str(year)].items():\r\n if re.search(str(group), key, re.IGNORECASE):\r\n dr.append(key)\r\n dd = [Counter(data[str(year)][x]) for x in dr]\r\n for y in range(1,len(dd)):\r\n for x in dd[0].keys():\r\n dd[0][str(x)] += dd[y][str(x)]\r\n rrr = dd[0]\r\n return rrr\r\ndef combinePropsComplex(year, group):\r\n dr = []\r\n rrr = None\r\n for key, value in data[str(year)].items():\r\n #if key.startswith(str(group)):\r\n # if re.search(str(group), key, re.IGNORECASE):\r\n if key in group:\r\n # print(key)\r\n dr.append(key)\r\n dd = [Counter(data[str(year)][x]) for x in dr]\r\n for y in range(1,len(dd)):\r\n for x in dd[0].keys():\r\n dd[0][str(x)] += dd[y][str(x)]\r\n rrr = dd[0]\r\n return rrr\r\ndef combineProps3new(complex,year):\r\n w = {}\r\n h=[]\r\n dd = [Counter(xyz[str(year)][x]) for x in complex]\r\n for y in list(sorted(xyz['2017']['Yorkdale Shopping Centre'].keys()))[:-2]+['Water and Sewage']:\r\n for x in range(1,len(dd)):\r\n\r\n dd[0][y]= dict(Counter(dd[0][y])+ Counter(dd[x][y]))\r\n w[y]= dict(dd[0][y])\r\n\r\n for y in list(sorted(xyz['2017']['Yorkdale Shopping Centre'].keys()))[:-2]+['Water and Sewage']:\r\n if not w[y]:\r\n w[y]['Total']=float(0)\r\n h.append(w[y]['Total'])\r\n\r\n w['Total'] = sum(h)\r\n return w\r\ndef combineProps2new(prop,p):\r\n dr = []\r\n w = {}\r\n h=[]\r\n for key, value in xyz[p].items():\r\n if re.search(str(prop),key,re.IGNORECASE):\r\n dr.append(key)\r\n dd = [Counter(xyz[p][x]) for x in dr]\r\n for y in list(sorted(xyz['2017']['Yorkdale Shopping Centre'].keys()))[:-2]+['Water and Sewage']:\r\n for x in range(1,len(dd)):\r\n dd[0][y]= dict(Counter(dd[0][y])+ Counter(dd[x][y]))\r\n w[y]= dict(dd[0][y])\r\n\r\n for y in list(sorted(xyz['2017']['Yorkdale Shopping Centre'].keys()))[:-2]+['Water and Sewage']:\r\n\r\n if not w[y]:\r\n w[y]['Total']=float(0)\r\n if prop == 'Yorkdale' and y == 'Recoverable Parking' and p == '2015':\r\n w[y]['Other - Rec Prkg'] = float(-2484)\r\n w[y]['Total'] = float(-2335)\r\n\r\n h.append(w[y]['Total'])\r\n\r\n w['Total'] = sum(h)\r\n # print(prop,w)\r\n return w\r\ndef switchclass():\r\n webbrowser.open(\"oxfordproperties.com\", new = 0)\r\nyears = range(2010,2019)\r\noff = office()\r\nAREA = totalareas()\r\nprovinces = pd.read_csv('provincesOffice.csv', header = None, names = ['Property', 'Prov'])\r\nprovs = {x[1]:x[2] for x in provinces.itertuples()}\r\ngroups = ['Citigroup Place', 'Royal Bank Plaza', 'Millennium Tower', 'MetroCentre']\r\ncansquare = ['2180 Yonge Street','2190 Yonge Street','2200 Yonge Street','2200 Yonge Street-Subway','Canada Square-Steam Plant','2210 Yonge Street']\r\ncantrusttower = ['TD Canada Trust Tower','CTT Common Area Costs','CT Twr Investmt Inc-Office-Adm','CT Tower Investment Inc-Retail']\r\nwpp = ['20 Bay Street','10 Bay Street','88 Queens Quay','WaterPark Pl-Underground Prkg.']\r\nECCOffice = ['Oxford Tower','TD Tower']\r\nRAC = ['111 Richmond St West','RAC Retail','RAC Prop Mgmt G and A','EY Tower','130 Adelaide St. West','120 Adelaide St West']\r\nDynamicFunds = ['20 Victoria St.','1 Adelaide St. East']\r\nBowValley = ['Bow Valley Sq 1 Co-Own', 'Bow Valley Sq 2 Co-Own', 'Bow Valley Sq 3 Co-Own', 'Bow Valley Sq 4 Co-Own',\r\n 'Bow Valley Square 1', 'Bow Valley Square 2', 'Bow Valley Square 3', 'Bow Valley Square 4']\r\nCentennialPlace = ['Centennial Place-Allocation', 'Centennial Place-Parking','Centennial Place-Alloc.Elim.','Centennial Allocation Centre', 'Centennial Place-Alloc.Elim.','Centennial Place-West Tower','Centennial Place-East Tower','Centennial Place-Retail']\r\npickle_in = open(\"budgetarea.pickle\",\"rb\")\r\nAREA['2018'] = pickle.load(pickle_in)\r\n\r\nstart = 2010\r\nend = 2017\r\nend2 = 2018\r\nsite = '1 Adelaide St. East'\r\ndesc = 'Total'\r\nsubdesc = 'Total'\r\n\r\n\r\npickle_in1 = open(\"xyz.pickle\",\"rb\")\r\nxyz = pickle.load(pickle_in1)\r\npickle_in2 = open(\"dict.pickle\",\"rb\")\r\ndata = pickle.load(pickle_in2)\r\npickle_in3 = open(\"sublist.pickle\",\"rb\")\r\nsublist = pickle.load(pickle_in3)\r\narray = [data['2017'].keys()]\r\nproperties=list(set(chain(*array)))\r\naccounts = list(data['2017']['88 Queens Quay'].keys())\r\nfor x in years:\r\n for each in properties:\r\n AREA[str(x)][each] = AREA[str(x)].get(each, -9999)\r\n\r\nfor y in years:\r\n for x in groups:\r\n AREA[str(y)][str(x)] = getAreas(str(x), str(y))\r\n # AREA[str(y)]['Bow Valley Square']= AREA[str(y)]['Bow Valley']\r\n # AREA[str(y)]['Centennial Place']= AREA[str(y)]['Centennial']\r\n\r\nfor y in years:\r\n AREA[str(y)]['ECC Office Complex'] = getAreas2(ECCOffice, str(y))\r\n AREA[str(y)]['RAC Complex'] = getAreas2(RAC, str(y))\r\n AREA[str(y)]['Dynamic Funds Tower Complex'] = getAreas2(DynamicFunds, str(y))\r\n AREA[str(y)]['WaterPark Place Complex'] = getAreas2(wpp, str(y))\r\n AREA[str(y)]['Canada Square Complex'] = getAreas2(cansquare, str(y))\r\n AREA[str(y)]['Canada Trust Tower Complex'] = getAreas2(cantrusttower, str(y))\r\n AREA[str(y)]['Bow Valley Square'] = getAreas2(BowValley, str(y))\r\n AREA[str(y)]['Centennial Place Complex'] = getAreas2(CentennialPlace, str(y))\r\n\r\nfor x in years:\r\n for y in groups:\r\n data[str(x)][str(y)] = combineProps(x, y)\r\nfor x in years:\r\n for y in groups:\r\n xyz[str(x)][str(y)] = combineProps2new(str(y), str(x))\r\nfor x in years:\r\n try:\r\n xyz[str(x)]['ECC Office Complex']= combineProps3new(ECCOffice,str(x))\r\n except Exception as e:\r\n pass\r\nfor x in years:\r\n try:\r\n xyz[str(x)]['Dynamic Funds Tower Complex'] = combineProps3new(DynamicFunds,str(x))\r\n except Exception as e:\r\n pass\r\nfor x in years:\r\n try:\r\n xyz[str(x)]['WaterPark Place Complex'] = combineProps3new(wpp,str(x))\r\n except Exception as e:\r\n pass\r\nfor x in years:\r\n try:\r\n xyz[str(x)]['Canada Square Complex'] = combineProps3new(cansquare, str(x))\r\n except Exception as e:\r\n pass\r\nfor x in years:\r\n try:\r\n xyz[str(x)]['Canada Trust Tower Complex'] = combineProps3new(cantrusttower, str(x))\r\n except Exception as e:\r\n pass\r\nfor x in years:\r\n try:\r\n xyz[str(x)]['RAC Complex'] = combineProps3new(RAC, str(x))\r\n except Exception as e:\r\n pass\r\nfor x in years:\r\n try:\r\n xyz[str(x)]['Centennial Place Complex'] = combineProps3new(CentennialPlace, str(x))\r\n except Exception as e:\r\n pass\r\nfor x in years:\r\n try:\r\n xyz[str(x)]['Bow Valley Square'] = combineProps3new(BowValley, str(x))\r\n except Exception as e:\r\n pass\r\n\r\nfor x in years:\r\n data[str(x)]['Dynamic Funds Tower Complex']=combinePropsComplex(x,DynamicFunds)\r\n data[str(x)]['WaterPark Place Complex'] = combinePropsComplex(x, wpp)\r\n data[str(x)]['Canada Square Complex'] = combinePropsComplex(x, cansquare)\r\n data[str(x)]['Canada Trust Tower Complex'] = combinePropsComplex(x, cantrusttower)\r\n data[str(x)]['ECC Office Complex'] = combinePropsComplex(x, ECCOffice)\r\n data[str(x)]['Bow Valley Square'] = combinePropsComplex(x, BowValley)\r\n data[str(x)]['Centennial Place Complex'] = combinePropsComplex(x, CentennialPlace)\r\n # data[str(x)]['Bow Valley Square']= data[str(x)]['Bow Valley']\r\n # data[str(x)]['Centennial Place']= data[str(x)]['Centennial']\r\n data[str(x)]['RAC Complex'] = combinePropsComplex(x, RAC)\r\n\r\nremoveprops = ['Bow Valley Sq 1 Co-Own', 'Bow Valley Sq 2 Co-Own', 'Bow Valley Sq 3 Co-Own', 'Bow Valley Sq 4 Co-Own',\r\n 'Bow Valley Square 1', 'Bow Valley Square 2', 'Bow Valley Square 3', 'Bow Valley Square 4',\r\n 'Centennial Place-Allocation', 'Centennial Place-Parking','Citigroup Place-Parking','RAC Prop Mgmt G and A',\r\n '2180 Yonge Street', '2190 Yonge Street', '2200 Yonge Street', '2200 Yonge Street-Subway',\r\n 'WaterPark Pl-Underground Prkg.','Centennial Allocation Centre', 'Centennial Place-Alloc.Elim.',\r\n '2210 Yonge Street','Canada Square-Steam Plant','CT Tower Investment Inc-Retail','Gen and Admin-Kingsway Garden',\r\n 'CT Twr Investmt Inc-Office-Adm', 'CTT Common Area Costs','Admin-DIX30 Theatre Holding LP',\r\n 'Royal Bank Plaza Admin.','Royal Bank Plaza - Retail','Royal Bank Plaza - North Tower','Royal Bank Plaza - South Tower',\r\n 'YSCHI - Yorkdale Place Admin','Upper Canada Mall Coown-Adm','Southcentre ORC Unique','Citigroup Place',\r\n 'Millennium Tower','Millennium Tower Co-Ownership', 'Millennium Tower Parkade','Metrocentre-Food Court',\r\n 'Centennial Place-Alloc.Elim.','MetroCentre-Wellington Tower','Metrocentre-Retail','MetroCentre-King Street Tower']\r\noff = office()\r\nnewoff = [x for x in off if x not in removeprops]\r\nnewoff = newoff + ['Les Promenades Gatineau-Office', 'Centennial Place Complex', 'Bow Valley Square'] + groups + ['RAC Complex', 'Dynamic Funds Tower Complex','WaterPark Place Complex','Canada Square Complex','Canada Trust Tower Complex','ECC Office Complex']\r\nnewoff = [x for x in newoff if x not in ['Centennial','Bow Valley']]\r\n\r\nemptyprop = {}\r\nemptyprop['Total'] = 0.0\r\nfor x in sublist:\r\n emptyprop[x] = {}\r\n emptyprop[x]['Total'] = 0.0\r\n for y in sublist[x]:\r\n emptyprop[x][y] = 0.0\r\n\r\nfor x in years:\r\n for y in newoff:\r\n if xyz[str(x)].get(y,None) == None:\r\n xyz[str(x)][str(y)] = emptyprop\r\n\r\nfor x in years:\r\n for y in newoff:\r\n data[str(x)][str(y)] = data[str(x)].get(y, {account: 0 for account in accounts})\r\n if data[str(x)][str(y)] == None:\r\n data[str(x)][str(y)] = {account:0 for account in accounts}\r\nfor x in years:\r\n for y in newoff:\r\n for z in sublist:\r\n for aa in sublist[z]:\r\n xyz[str(x)][str(y)][str(z)][str(aa)] = xyz[str(x)][str(y)][str(z)].get(aa, 0.0)\r\n\r\n#bokeh\r\nxx, yy = sorting(make_stuff3('Total',newoff, end))\r\nsourcepsqft = ColumnDataSource(data=dict(x=xx, y=yy, desc=xx, max = yy))\r\nxx2, yy2 = sorting(make_stuff3('Total',[x[0] for x in provs.items() if x[1] == 'Ontario'], end))\r\nsourcepsqft2 = ColumnDataSource(data=dict(x=xx2, y=yy2, desc=xx2, max = yy2))\r\n\r\nhoverline = HoverTool(tooltips=[\r\n (\"Year\", \"$x{0}\"),\r\n (\"Cost\", \"$y{($ 0.00 a)}\"),])\r\nhoverbar = HoverTool(tooltips=[\r\n (\"Property\", \"@desc\"),\r\n (\"Cost\", \"@max{($ 0.00 a)}\"),])\r\nhoverpie = HoverTool(tooltips=[\r\n (\"Operational Cost\", \"@category\"),\r\n (\"Percentage\", \"@percents{(0.0)}%\"),])\r\n\r\nsource = get_source(start, end, site, desc)\r\nsourceLine2 = get_sourcepsqft(start, end, site, desc)\r\nsource2018 = get_source(end, end2, site, desc)\r\nsourceLine22018 = get_sourcepsqft(end, end2, site, desc)\r\npctchange = ((source.data['y'][-1]/(source.data['y'][-2]+1))-1)*100\r\n\r\nopts = list(data['2017']['1 Adelaide St. East'].keys())\r\nopts.remove('Concierge')\r\ndenoms = ['RF','RP','RT','RK']\r\n\r\nbutton = Button(label=\"Retail Dashboard\", button_type=\"success\")\r\nbutton.on_click(switchclass)\r\nselect = Select(title='Property:', value=site, options=sorted(list(newoff)))\r\nselect.on_change('value', updateproperty)\r\nselect2 = Select(title='Operational Cost:', value=desc, options=sorted(opts))\r\nselect2.on_change('value', updateopcost)\r\nselect3 = Select(title='Operational Cost Breakdown:', value=desc, options=['Total'])\r\nselect3.on_change('value', updatesubopcost)\r\nselectyear = Select(title='Year:', value=str(end), options=[str(e) for e in range(2010,2019)])\r\nselectyear.on_change('value', updateyear)\r\nselectdenominator = Select(title='Denominator:', value='Total Leasable Area', options=[str(e) for e in range(2010,2019)])\r\nselectdenominator.on_change('value', updatedenominator)\r\nselectprovince = Select(title='Province:', value='All', options=['All','Alberta','British Columbia','Ontario','Quebec'])\r\nselectprovince.on_change('value', updateprovince)\r\nxxx = ['{}'.format(select.value), 'National Average','Compared to National Average','{} Average'.format(provs[select.value]),'Compared To Provincial Average', 'Variance', 'Top 25%', '2016 vs 2017']\r\n\r\nif select2.value != 'Total':\r\n sourcepie = makepie(xyz[str(selectyear.value)][str(select.value)][str(select2.value)])\r\n ss, tt = sorting(xyz[str(selectyear.value)][str(select.value)][str(select2.value)])\r\nelse:\r\n sourcepie = makepie(data[str(selectyear.value)][str(select.value)])\r\n ss, tt = sorting(data[str(selectyear.value)][str(select.value)])\r\n\r\np = figure(plot_width=800, plot_height=500,\r\n tools=[hoverline, BoxSelectTool(),BoxZoomTool(), ResetTool(),WheelZoomTool(), SaveTool(), PanTool()],\r\n title = select.value+\" Operational Costs\",\r\n x_axis_label = ' ',\r\n y_axis_label = \"Total Operational Costs\",toolbar_location=\"right\")\r\np.extra_y_ranges = {\"foo\": Range1d(start=0, end=max(sourceLine2.data['y'])*1.2)}\r\np.add_layout(LinearAxis(y_range_name=\"foo\", axis_label=''), 'right')\r\np.line('x', 'y', source = source, line_width=2.5, line_color='#E24A33', legend = 'Total Cost')\r\np.line('x', 'y', source = sourceLine2, line_width=2.5, line_color='#d3be1d',y_range_name=\"foo\", legend = 'Cost per SQFT')\r\np.circle('x', 'y', source=sourceLine2, radius=0.035, line_color='#d3be1d', fill_color=\"#d3be1d\",y_range_name=\"foo\")\r\np.circle('x', 'y', source=source, radius=0.035, line_color=\"#E24A33\", fill_color=\"red\")\r\n\r\np.line('x', 'y', source = source2018, line_width=2.5, line_color='#f16913', legend = '2018 Budget', line_alpha = 0.68)\r\np.line('x', 'y', source = sourceLine22018, line_width=2.5, line_color='#f16913',y_range_name=\"foo\", line_alpha = 0.68)\r\np.circle('x', 'y', source=sourceLine22018, radius=0.035, line_color='#f16913', fill_color=\"#f16913\",y_range_name=\"foo\")\r\np.circle('x', 'y', source=source2018, radius=0.035, line_color=\"#f16913\", fill_color=\"#f16913\")\r\np.background_fill_color='#E6E6E6'\r\np.yaxis.formatter = BasicTickFormatter(use_scientific=False)\r\np.yaxis.formatter = NumeralTickFormatter(format= '$0,0')\r\np.ygrid.grid_line_color = 'white'\r\np.xgrid.grid_line_color = 'white'\r\np.xaxis.ticker = [2010,2011,2012,2013,2014,2015,2016,2017,2018]\r\np.axis.minor_tick_line_alpha = 0\r\np.axis.axis_line_color='#E6E6E6'\r\np.axis.major_tick_in= -1\r\np.axis.axis_label_text_font_style= 'normal'\r\np.xaxis.axis_label_text_font_size= '10pt'\r\np.yaxis.axis_label_text_font_size= '11pt'\r\np.axis.major_label_text_font_size= '9pt'\r\np.axis.major_label_text_font_style='bold'\r\np.title.align = 'center'\r\np.title.text_font_size = '11pt'\r\np.toolbar.active_scroll = \"auto\"\r\np.legend.location = 'bottom_left'\r\n\r\nr = widgetbox(select, select2, select3, selectyear, selectprovince, button, width=300)\r\n\r\nw = figure(plot_width=500, plot_height=500,\r\n tools=[hoverpie, BoxSelectTool(),BoxZoomTool(), ResetTool(),WheelZoomTool(), SaveTool(), PanTool()],\r\n title = selectyear.value+\" \"+select2.value+\" Breakdown\", x_axis_label=\"\", y_axis_label=\"\", toolbar_location=\"right\")\r\nw.annular_wedge(x='x', y='y', inner_radius='ymin', outer_radius='ymax', direction=\"anticlock\",\r\n start_angle='starts', end_angle='ends', color='colors', source=sourcepie)\r\nw.axis.visible=False\r\nw.grid.visible=False\r\nw.title.align = 'center'\r\nw.title.text_font_size = '11pt'\r\nw.toolbar.active_scroll = \"auto\"\r\n\r\nz = figure(x_range = sourcepsqft.data['x'], plot_width=1300, plot_height=500,\r\n tools=[hoverbar, BoxSelectTool(), BoxZoomTool(), ResetTool(),WheelZoomTool(), SaveTool(), PanTool()],\r\n title = str(selectyear.value) + ' Portfolio '+str(select2.value)+' Per Total Leasable Area', y_axis_label=\"Cost\", toolbar_location=\"right\")\r\nz.vbar(x='x', top='y', source = sourcepsqft, width = 0.15)\r\ndd= 1\r\nfor a in range(len(xx)):\r\n if xx[a] == select.value:\r\n dd = a+0.5\r\nspan = Span(location=dd, dimension='height', line_color='#f16913', line_dash='solid', line_width=3, line_alpha=0.6)\r\nz.add_layout(span)\r\nz.title.align = 'center'\r\nz.title.text_font_size = '11pt'\r\nz.toolbar.active_scroll = \"auto\"\r\nz.background_fill_color = '#E6E6E6'\r\nz.yaxis.formatter = BasicTickFormatter(use_scientific=False)\r\nz.yaxis.formatter = NumeralTickFormatter(format = '$0,0')\r\nz.ygrid.grid_line_color = 'white'\r\nz.xgrid.visible = False\r\nz.xaxis.visible = False\r\nz.xgrid.grid_line_color = 'white'\r\nz.axis.minor_tick_line_alpha = 0\r\nz.axis.axis_line_color = '#E6E6E6'\r\nz.axis.major_tick_in = -1\r\nz.axis.axis_label_text_font_style = 'normal'\r\nz.axis.axis_label_text_font_size = '11pt'\r\nz.axis.major_label_text_font_size = '9pt'\r\nz.axis.major_label_text_font_style = 'bold'\r\n\r\nyyy = ['${:.2f}'.format(make_stuff3('Total', newoff, end)[select.value]),\r\n '${:.2f}'.format(np.mean(sourcepsqft.data['y'])),\r\n '{:.2f}X'.format(make_stuff3('Total',newoff, end)[select.value]/np.mean(sourcepsqft.data['y'])),\r\n '${:.2f}'.format(np.mean(sourcepsqft2.data['y'])),\r\n '{:.2f}X'.format(make_stuff3('Total',newoff, end)[select.value]/np.mean(sourcepsqft2.data['y'])),\r\n '${:.2f}'.format(np.std(sourcepsqft.data['y'])),'${:.2f}'.format(np.percentile(sourcepsqft.data['y'],25)),\r\n '{:.2f}%'.format(pctchange)]\r\n\r\nsmalltable = ColumnDataSource(data = dict(x = xxx,y = yyy))\r\ntable1 = DataTable(source = smalltable, columns = [TableColumn(field = 'x', title = ' ',formatter = StringFormatter(text_align = 'center'), width = 475), TableColumn(field = 'y', title = ' ',formatter = StringFormatter(text_align = 'center'))], width = 500, height = 500, index_position = None, sortable = False)\r\n\r\nww = [x/max(tt) for x in tt]\r\nzz = [x/AREA[selectyear.value][select.value] for x in tt]\r\nbigtable = ColumnDataSource(data = dict(x = ss,y = tt, w=ww,z=zz))\r\ntable2 = DataTable(source = bigtable, columns = [TableColumn(field = 'x', title = 'Account',formatter = StringFormatter(text_align = 'center'), width = 500), TableColumn(field = 'y', title = 'Cost',formatter = NumberFormatter(format=\"$0,0\")),TableColumn(field = 'z', title = '$ per SQFT',formatter = NumberFormatter(format = '$0.00')), TableColumn(field = 'w', title = 'Percent',formatter = NumberFormatter(format=\"0.0%\"))],width = 500, height = 500, index_position = None, sortable = False)\r\n\r\n# print([x for x in provs if provs[x] == 'Ontario'])\r\n# print([x for x in provs if provs[x] == 'Alberta'])\r\n# print([x for x in provs if provs[x] == 'Quebec'])\r\n# print('\\n', AREA)\r\nprint(off)\r\naa = row ([z,table1])\r\nrw = row([p, w, table2])\r\ncol = column([rw,aa])\r\ngrid = row([r, col])\r\ncurdoc().add_root(grid)\r\nshow(grid)\r\n\r\n","sub_path":"opcostsOFFICE.py","file_name":"opcostsOFFICE.py","file_ext":"py","file_size_in_byte":52303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"181885958","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# preProcessing.py\n# \n# Copyright 2020 Luiz Oliveira\n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\n\"\"\"\nMain module\n\nThis script analyses the output of simulations ran on OpenFoam\nThe analysis steps are performed by the modules in the bin folder\n\"\"\"\n\nimport sys\nimport os\nimport shutil\nimport time\nstart_time = time.time()\n\n# Check for necessary directories\nif not os.path.exists('preTreatment'):\n os.makedirs('preTreatment')\n print(\"The directory preTreatment/ was created, please populate with the \"\n \"desired csv files to be analysed.\")\n sys.exit('The directory preTreatment/ did not exist.')\nelif not os.listdir('preTreatment'):\n sys.exit('The directory preTreatment/ is empty.')\n \n# Clear the previous results directories\nif os.path.exists('preTreatment/results'):\n shutil.rmtree('preTreatment/results')\nos.makedirs('preTreatment/results')\nos.makedirs('preTreatment/results/Excel')\nos.makedirs('preTreatment/results/CSV')\nos.makedirs('preTreatment/results/Plot')\n\n# Define Global Variables\nH = 0.10\nU = 0.101\nW = 0.15\nL = 0.25\nY0 = 0.30\nX0 = 0.25\nRHO = 1e-6\n\n# Import CSV\nexec(open(\"bin/importCSV.py\").read())\nprint(\"\"\"Importing Done...\nElapsed Time %.3f s\\n\"\"\" %(time.time() - start_time))\n\n# Data Processing\ntry:\n exec(open(\"bin/dataProcess.py\").read())\n print(\"\"\"Processing Done...\nElapsed Time %.3f s\\n\"\"\" %(time.time() - start_time))\nexcept:\n print(\"\"\"No data was processed.\nThe script jumped into the next section: Mass Fitting\"\"\")\n print(\"Elapsed Time %.3f s\\n\" %(time.time() - start_time))\n\n# Mass Fitting\ntry: \n exec(open(\"bin/mass.py\").read())\n print(\"\"\"Mass Fitting Done...\nElapsed Time %.3f s\\n\"\"\" %(time.time() - start_time))\nexcept:\n print(\"\"\"No mass data was processed.\nThe script jumped into the next section: Mixing Layer Thickness\"\"\")\n print(\"Elapsed Time %.3f s\\n\" %(time.time() - start_time))\n \n# Mixing Layer Thickness\ntry: \n exec(open(\"bin/thickness.py\").read())\n print(\"\"\"Mixing Layer Thickness Calculated...\nElapsed Time %.3f s\\n\"\"\" %(time.time() - start_time))\nexcept:\n print(\"\"\"No mixing layer thickness data was processed.\nThe script jumped into the next section: Plotting\"\"\")\n print(\"Elapsed Time %.3f s\\n\" %(time.time() - start_time))\n \n# Plot Data\ntry:\n exec(open(\"bin/plot.py\").read())\n print(\"\"\"Plotting Done...\nElapsed Time %.3f s\\n\"\"\" %(time.time() - start_time))\nexcept:print(\"No plotting was done.\\n\")\n\nprint(\"\"\"All Done...\nExecution Time %.3f seconds\"\"\" %(time.time() - start_time))\ndel start_time\n","sub_path":"Post Processing/preProcessing.py","file_name":"preProcessing.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"107556202","text":"from fractions import Fraction\r\n\r\nclass Matrix:\r\n def __init__(self, a):\r\n self.matr = [list(map(lambda x: Fraction(x, 1), a[i])) for i in range(len(a))]\r\n\r\n def __str__(self):\r\n return '\\n'.join(map(lambda x: '\\t'.join(map(str, x)), self.matr))\r\n\r\n def size(self):\r\n lenStr = 0\r\n if len(self.matr) != 0:\r\n lenStr = len(self.matr[0])\r\n return (len(self.matr), lenStr)\r\n\r\n def __add__(self, other):\r\n if self.size()[0] != other.size()[0] or \\\r\n self.size()[1] != other.size()[1]:\r\n raise MatrixError(self, other)\r\n newMatr = [[Fraction(0, 1)] * self.size()[1] for i in range(self.size()[0])]\r\n for i in range(len(self.matr)):\r\n for j in range(len(self.matr[i])):\r\n newMatr[i][j] = self.matr[i][j] + other.matr[i][j]\r\n return Matrix(newMatr)\r\n\r\n def __mul__(self, x):\r\n if isinstance(x, int) or isinstance(x, float) or isinstance(x, Fraction):\r\n newMatr = [[Fraction(0, 1)] * self.size()[1] for i in range(self.size()[0])]\r\n for i in range(len(self.matr)):\r\n for j in range(len(self.matr[i])):\r\n newMatr[i][j] = self.matr[i][j] * x\r\n return Matrix(newMatr)\r\n elif isinstance(x, Matrix) and self.size()[1] == x.size()[0]:\r\n newMatr = [[Fraction(0, 1)] * x.size()[1] for i in range(self.size()[0])]\r\n for i in range(len(self.matr)):\r\n for j in range(x.size()[1]):\r\n for k in range(self.size()[1]):\r\n newMatr[i][j] += self.matr[i][k] * x.matr[k][j]\r\n return Matrix(newMatr)\r\n else:\r\n raise MatrixError(self, x)\r\n\r\n __rmul__ = __mul__\r\n \r\n def inverse(self): # находим обратную методом Гаусса\r\n n = self.size()[0]\r\n newMatr = [list(self.matr[i]) for i in range(n)] # копия нашей матрицы\r\n theInverse = [[Fraction(0, 1)] * n for i in range(n)] # единичная матрица, в конце здесь будет лежать обратная\r\n for i in range(n):\r\n theInverse[i][i] = Fraction(1, 1) # заполняем её единицами на диагонали\r\n for i in range(n): # в этом цикле приводим матрицу к ступенчатому виду, будем называть текущей строкой i-ую строку\r\n j = i\r\n while j < n and newMatr[j][i] == 0: # ищем в столбце ненулевой элемент\r\n j += 1\r\n if j >= n: # не нашли ненулевой элемент, значит, матрица необратима\r\n raise MatrixError(self)\r\n tmp = list(newMatr[i])\r\n newMatr[i] = list(newMatr[j])\r\n newMatr[j] = list(tmp)\r\n invTmp = list(theInverse[i])\r\n theInverse[i] = list(theInverse[j])\r\n theInverse[j] = list(invTmp) # поменяли местами строку с ненулевым элементом и текущую, чтобы в текущей на нужной позиции был ненулевой элемент\r\n for k in range(i + 1, n): # в этом цикле зануляем столбец ниже текущей позиции\r\n coeff = newMatr[k][i] / newMatr[i][i]\r\n newMatr[k][i] = 0\r\n for ind in range(i, n): # в этом и следующем циклах вычитаем из строки текущую строку, домноженную на коэффициент\r\n newMatr[k][ind] -= coeff * newMatr[i][ind]\r\n for ind in range(n):\r\n theInverse[k][ind] -= coeff * theInverse[i][ind]\r\n for i in range(n): # в этом цикле делаем все элементы на диагонали равными 1\r\n coeff = newMatr[i][i]\r\n for j in range(n):\r\n newMatr[i][j] /= coeff\r\n theInverse[i][j] /= coeff\r\n for i in range(1, n): # в этом цикле делаем все элементы выше диагонали равными 0\r\n for j in range(i):\r\n coeff = newMatr[j][i]\r\n for k in range(i, len(newMatr)):\r\n newMatr[j][k] -= coeff * newMatr[i][k]\r\n for k in range(n):\r\n theInverse[j][k] -= coeff * theInverse[i][k]\r\n return Matrix(theInverse)\r\n\r\n\r\nclass MatrixError(BaseException):\r\n def __init__(self, m1, m2):\r\n self.matrix1 = m1\r\n self.matrix2 = m2\r\n\r\nalpha = [[-2, -2, 3, 3], [1, -2, -1, -3], [-3, -1, 2, -3], [3, 2, -3, 3]]\r\nbeta = [[20, 11, -17, -16], [-12, -18, 5, -14], [-3, 16, -8, -8], [-20, -10, 12, -6]]\r\ngamma = [[-3, -1, -3, -1], [1, 3, -3, 1], [3, 2, -3, 2], [1, 1, -1, 2]]\r\ndelta = [[-1, -2, 1, 2], [2, 3, -4, -1], [-2, -2, 5, -1], [2, 2, -4, 1]] # до этого момента -- входные данные\r\nalphaM = Matrix(alpha)\r\nbetaM = Matrix(beta)\r\ngammaM = Matrix(gamma)\r\ndeltaM = Matrix(delta)\r\nrightPart = alphaM.inverse() * deltaM\r\nrightPart = rightPart * gammaM.inverse()\r\nrightPart = rightPart.inverse() # здесь возвели левую и правую части в -1 степень\r\nx = rightPart + ((-1) * betaM)\r\nprint(x)\r\n","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":5444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"339345123","text":"\"\"\"\nA command-line controlled coffee maker.\n\"\"\"\n\nimport sys\nimport string\nimport os\n\n\"\"\"\nImplement the coffee maker's commands. Interact with the user via stdin and print to stdout.\n\nRequirements:\n - use functions\n - use __main__ code block\n - access and modify dicts and/or lists\n - use at least once some string formatting (e.g. functions such as strip(), lower(),\n format()) and types of printing (e.g. \"%s %s\" % tuple([\"a\", \"b\"]) prints \"a b\"\n - BONUS: read the coffee recipes from a file, put the file-handling code in another module\n and import it (see the recipes/ folder)\n\nThere's a section in the lab with syntax and examples for each requirement.\n\nFeel free to define more commands, other coffee types, more resources if you'd like and have time.\n\"\"\"\n\n\"\"\"\nTips:\n* Start by showing a message to the user to enter a command, remove our initial messages\n* Keep types of available coffees in a data structure such as a list or dict\ne.g. a dict with coffee name as a key and another dict with resource mappings (resource:percent)\nas value\n\"\"\"\n\n# Commands\nEXIT = \"exit\"\nLIST_COFFEES = \"list\"\nMAKE_COFFEE = \"make\" #!!! when making coffee you must first check that you have enough resources!\nHELP = \"help\"\nREFILL = \"refill\"\nRESOURCE_STATUS = \"status\"\ncommands = [EXIT, LIST_COFFEES, MAKE_COFFEE, REFILL, RESOURCE_STATUS, HELP]\n\n# Coffee examples\nESPRESSO = \"espresso\"\nAMERICANO = \"americano\"\nCAPPUCCINO = \"cappuccino\"\n\n# Resources examples\nWATER = \"water\"\nCOFFEE = \"coffee\"\nMILK = \"milk\"\n\n# Coffee maker's resources - the values represent the fill percents\nRESOURCES = {WATER: 100, COFFEE: 100, MILK: 100}\n\n\"\"\"\nExample result/interactions:\n\nI'm a smart coffee maker\nEnter command:\nlist\namericano, cappuccino, espresso\nEnter command:\nstatus\nwater: 100%\ncoffee: 100%\nmilk: 100%\nEnter command:\nmake\nWhich coffee?\nespresso\nHere's your espresso!\nEnter command:\nrefill\nWhich resource? Type 'all' for refilling everything\nwater\nwater: 100%\ncoffee: 90%\nmilk: 100%\nEnter command:\nexit\n\"\"\"\n\n# print(\"I'm a simple coffee maker\")\n# print(\"Press enter\")\n# sys.stdin.readline()\n# print(\"Teach me how to make coffee...please...\")\nif __name__ == \"__main__\":\n print(\"I'm a smart coffee maker\")\n while True:\n print(\"Enter command:\")\n command = sys.stdin.readline().strip().lower()\n if command not in commands:\n print(\"not supported\")\n else:\n if command == LIST_COFFEES:\n files = os.listdir(\"recipes/\")\n for file in files:\n print(file[:-4])\n elif command == MAKE_COFFEE:\n print(\"Which coffee?\")\n available_types = [f[:-4] for f in os.listdir(\"recipes/\")]\n coffee_type = sys.stdin.readline().strip().lower()\n if coffee_type not in available_types:\n print(\"Choose from: \")\n for t in available_types:\n print(t)\n else:\n f = open(\"recipes/\" + coffee_type + \".txt\", \"r\")\n file_content = f.readlines()\n not_enough = False\n resource_copy = RESOURCES\n for line in file_content[1:]:\n usage_map = line.strip().split(\"=\")\n value = int(usage_map[1])\n if RESOURCES[usage_map[0]] >= value:\n RESOURCES[usage_map[0]] = RESOURCES[usage_map[0]] - value\n else:\n not_enough = True\n print(\"Please refill %s\" % RESOURCES[usage_map[0]])\n if not_enough:\n RESOURCES = resource_copy\n print(\"Here's your %s\" % coffee_type)\n elif command == REFILL:\n print(\"Which resource? Type 'all' for refilling everything\")\n resource = sys.stdin.readline().strip().lower()\n if resource in RESOURCES.keys():\n RESOURCES[resource] = 100\n for k, v in RESOURCES.items():\n print(\"%s: %s%%\" % (k, v))\n elif resource == \"all\":\n for k, v in RESOURCES.items():\n RESOURCES[k] = 100\n print(\"%s: %s%%\" % (k, v))\n else:\n print(\"Not a valid resource\")\n elif command == RESOURCE_STATUS:\n for k, v in RESOURCES.items():\n RESOURCES[k] = 100\n print(\"%s: %s%%\" % (k, v))\n elif command == HELP:\n for c in commands:\n print(c)\n elif command == EXIT:\n break\n","sub_path":"lab1-skel/coffee_maker.py","file_name":"coffee_maker.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"565199511","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nfrom PIL import Image\n\n#https://www.digitalocean.com/community/tutorials/how-to-build-a-neural-network-to-recognize-handwritten-digits-with-tensorflow\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True) # y labels are oh-encoded\n\n# number of example data points\nn_train = mnist.train.num_examples # 55,000\nn_validation = mnist.validation.num_examples # 5000\nn_test = mnist.test.num_examples # 10,000\n\n# Define Layers of neural network\nn_input = 784 # input layer (28x28 pixels)\nn_hidden1 = 512 # 1st hidden layer\nn_hidden2 = 256 # 2nd hidden layer\nn_hidden3 = 128 # 3rd hidden layer\nn_output = 10 # output layer (0-9 digits)\n\n# Define Hypervariables\nlearning_rate = 1e-4\nn_iterations = 1000\nbatch_size = 128\ndropout = 0.5\n\n# define 3 tensors as placeholders\nX = tf.placeholder(\"float\", [None, n_input]) # feed in a None (unknown) amount of n_input (784) pixel images\nY = tf.placeholder(\"float\", [None, n_output]) # feed out unknown amount of n_output (10) possible outputs\nkeep_prob = tf.placeholder(tf.float32) # we inititailze keep_prob as a placeholder so we can use it with a dropout rate\n # of 0.5 now, and later 1.0 when we test\n\n# weights are randomly selected from a truncated normal distrobution (better accuracy than if we unradomly set them)\nweights = {\n 'w1': tf.Variable(tf.truncated_normal([n_input, n_hidden1], stddev=0.1)),\n 'w2': tf.Variable(tf.truncated_normal([n_hidden1, n_hidden2], stddev=0.1)),\n 'w3': tf.Variable(tf.truncated_normal([n_hidden2, n_hidden3], stddev=0.1)),\n 'out': tf.Variable(tf.truncated_normal([n_hidden3, n_output], stddev=0.1)),\n}\n\n# for biases, we use a constant rather than a random number, to make sure that tensors actually activate during the initial\n# training iterations\nbiases = {\n 'b1': tf.Variable(tf.constant(0.1, shape=[n_hidden1])),\n 'b2': tf.Variable(tf.constant(0.1, shape=[n_hidden2])),\n 'b3': tf.Variable(tf.constant(0.1, shape=[n_hidden3])),\n 'out': tf.Variable(tf.constant(0.1, shape=[n_output]))\n}\n\n#Each hidden layer will execute matrix multiplication on the previous layer’s outputs and the current layer’s weights,\n# and add the bias to these values. At the last hidden layer, we will apply a dropout operation using our keep_prob\n# value of 0.5.\nlayer_1 = tf.add(tf.matmul(X, weights['w1']), biases['b1'])\nlayer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])\nlayer_3 = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])\nlayer_drop = tf.nn.dropout(layer_3, keep_prob)\noutput_layer = tf.matmul(layer_3, weights['out']) + biases['out']\n\n\n# define the loss function to optimize\ncross_entropy = tf.reduce_mean( #cross_entropy is a popular tensor-flow loss function\n tf.nn.softmax_cross_entropy_with_logits(\n labels=Y, logits=output_layer\n ))\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) #Adam optimizer is a type of gradient-descent optimizer\n # that can minimize the loss function\n\n# define what is correct vs incorrect, and what is accuracy\ncorrect_pred = tf.equal(tf.argmax(output_layer, 1), tf.argmax(Y, 1)) # see if training guess is equal to stored value (1 or 0)\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # get % accurate by averaging the booleans\n\n#initialize training session\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n\n\n# train on mini batches\n# We use mini-batches of images rather than feeding them through individually to speed up the training process and\n# allow the network to see a number of different examples before updating the parameters.\nfor i in range(n_iterations):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n sess.run(train_step, feed_dict={\n X: batch_x, Y: batch_y, keep_prob: dropout\n })\n\n # print loss and accuracy (per minibatch)\n if i % 100 == 0:\n minibatch_loss, minibatch_accuracy = sess.run(\n [cross_entropy, accuracy],\n feed_dict={X: batch_x, Y: batch_y, keep_prob: 1.0}\n )\n print(\n \"Iteration\",\n str(i),\n \"\\t| Loss =\",\n str(minibatch_loss),\n \"\\t| Accuracy =\",\n str(minibatch_accuracy)\n )\n\n\n\n# run on TEST images\ntest_accuracy = sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1.0})\nprint(\"\\nAccuracy on test set:\", test_accuracy)\n\n\n\n# try our own image\npath = r\"C:\\Users\\a.ibele\\PycharmProjects\\tensorflow\\test_img.png\"\nimg = np.invert(Image.open(path).convert('L')).ravel()\n\nprediction = sess.run(tf.argmax(output_layer, 1), feed_dict={X: [img]})\nprint (\"Prediction for test image:\", np.squeeze(prediction))","sub_path":"tensorflow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621596809","text":"\nclass Movie():\n \"\"\"\"\"\"\n VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"]\n\n def __init__(\n self,\n movie_title,\n movie_storyline,\n poster_image,\n trailer_youtube\n ):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.rating = 'G'\n\n def show_trailer(self):\n webbrowser.open(self.trailer_youtube)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"6372702","text":"\"\"\"\r\nCarND-Behavioral-Cloning-P3 Project\r\nPhilip Lee 6/27/18\r\n\r\nUdacity CarND: ami-c4c4e3a4\r\n\r\nAWS CHECKLIST\r\n1. CREATE AWS INSTANCE\r\n2. LOG INTO INSTANCE (carnd, carnd)\r\n3. PIP INSTALL OPENCV-PYTHON\r\n4. PIP INSTALL TENSORFLOW\r\n5. PIP INSTALL KERAS==1.2.1\r\n5. ZIP EVERYTHING UP AND UPLOAD INTO AWS INSTANCE (scp data.zip carnd@X.X.X.X:/home/carnd/CarND-Behavioral-Cloning-P3/)\r\n6. RUN CLONE.PY\r\n7. DOWNLOAD MODEL BACK FROM AWS INSTANCE (scp carnd@X.X.X.X:/home/carnd/CarND-Behavioral-Cloning-P3/model.h5 .)\r\n8. RUN PYTHON DRIVE.PY MODEL.H5\r\n9. DON'T CRASH INTO THINGS\r\n\"\"\"\r\n\r\n\r\nimport csv\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nlines = []\r\nwith open('./data/driving_log.csv') as csvfile:\r\n\treader = csv.reader(csvfile)\r\n\tfor line in reader:\r\n\t\tlines.append(line)\r\n\r\ncorrection = 0.1\r\nimages = []\r\nmeasurements = []\r\nfor line in lines:\r\n\tfor i in range(3):\r\n\t\tsource_path = line[i]\r\n\t\tfilename = source_path.split('\\\\')[-1]\r\n\t\tcurrent_path = './data/IMG/' + filename\r\n\r\n\t\t#INTERPRET AND ADD REGULAR ORIENTATION IMAGE\r\n\t\timage = cv2.imread(current_path)\r\n\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n\t\timages.append(image)\r\n\t\t#INTERPRET AND ADD FLIPPED ORIENTATION IMAGE\r\n\t\timage_flipped = np.fliplr(image)\r\n\t\timages.append(image_flipped)\r\n\r\n\t#ADD CENTER MEASUREMENTS\r\n\tmeasurement = float(line[3])\r\n\tmeasurements.append(measurement)\r\n\tmeasurements.append(-measurement)#FLIPPED MEASUREMENTS\r\n\t\r\n\tmeasurements.append(measurement+correction)#LEFT IMAGE WITH 2.5 DEGREE OFFSET\r\n\tmeasurements.append(-(measurement+correction))#FLIPPED LEFT IMAGE WITH 2.5 DEGREE OFFSET\r\n\t\r\n\tmeasurements.append(measurement-correction)#RIGHT IMAGE WITH 2.5 DEGREE OFFSET\r\n\tmeasurements.append(-(measurement-correction))#FLIPPED RIGHT IMAGE WITH 2.5 DEGREE OFFSET\r\n\r\n\t\r\n\t\r\n\t\r\n#print(filename)\t\r\n\t\r\nX_train = np.array(images)\r\ny_train = np.array(measurements)\r\n#print(X_train[0])\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Flatten, Dense, Lambda, Dropout\r\nfrom keras.layers.convolutional import Cropping2D, Convolution2D\r\nfrom keras.layers.normalization import BatchNormalization\r\n\r\n\"\"\"OLD MODEL\r\nmodel = Sequential()\r\nmodel.add(Lambda(lambda X_train: (X_train / 255.0) - 0.5, input_shape=(160,320,3)))\r\nmodel.add(Flatten(input_shape=(160,320,3)))\r\nmodel.add(Dense(1))\r\n\"\"\"\r\n\r\n#NVIDIA MODEL\r\nmodel = Sequential()\r\nmodel.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = (160,320,3)))\r\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\r\nmodel.add(Convolution2D(24,5,5, subsample=(2,2), activation=\"relu\"))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(36,5,5, subsample=(2,2), activation=\"relu\"))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(48,5,5, subsample=(2,2), activation=\"relu\"))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(64,3,3, activation=\"relu\"))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(64,3,3, activation=\"relu\"))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(100))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(50))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(10))\r\nmodel.add(Dense(1))\r\n\r\nmodel.compile(loss='mse', optimizer='adam')\r\nmodel.fit(X_train, y_train, verbose=1, validation_split = 0.2, shuffle = True, nb_epoch = 5)\r\n\r\nmodel.save('model.h5')\r\n\r\n\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"357981728","text":"\n# coding: utf-8\n\n# ## 运行环境\n# python 3.6.3\n# \n# Anaconda custom (64-bit)\n# \n# win10\n\n# In[1]:\n\n\nimport numpy as np # 矩阵运算\nimport matplotlib.pyplot as plt # 绘图\nfrom PIL import Image # 显示图片\nimport struct # 解析dataset\n\n\n# ## 任务定义\n# \n# handwritten digits recognition\n# \n# 手写体识别\n# \n# 使用全连接的神经网络进行手写体识别\n# \n# 加入正则化, dropout, 并且使用random mini batch 加快训练速度\n\n# ## 文件到数据\n# \n# 这里是将文件转为numpy的数组进行表示\n# \n# ### 提取图片和标签\n# 得到numpy.ndarray表示的数据, shape如下\n# \n# ```python\n# trainImage.shape = (60000,28,28)\n# trainLabel.shape = (60000,1)\n# testImage.shape = (10000, 28, 28)\n# testLabel.shape = (10000, 1)\n# ```\n\n# In[2]:\n\n\nwith open('train-images.idx3-ubyte','rb') as f:\n trainImage = f.read()\nwith open('train-labels.idx1-ubyte','rb') as f:\n trainLabel = f.read()\nwith open('t10k-images.idx3-ubyte','rb') as f:\n testImage = f.read()\nwith open('t10k-labels.idx1-ubyte','rb') as f:\n testLabel = f.read()\n\n# API : struct.unpack(fmt,bytes)\n\n# bytes -> (int,int,int,int,numpy.ndarray[])\n# 接受dataSet原始二进制数据, 返回array, shape为(60000,28,28)\ndef getImageFromMNIST(imageBytes):\n assert(type(imageBytes) is bytes),'parameter is no bytes'\n magicNum, numOfImages = struct.unpack('>2i',imageBytes[:8]) # 读入校验数据(无用处)和图片数量\n rows, columns = struct.unpack('>2i',imageBytes[8:16]) # 读入图片行数和列数\n imageSize = rows * columns # 行 * 列 = 图片的规模\n # 读入所有图片数据, 总共有 图片规模 * 图片数量 个 unsigned byte\n imageTuple = struct.unpack('>'+str(imageSize * numOfImages)+'B',imageBytes[16:])\n imageArray = np.array(imageTuple) # 数据类型为默认的int\n return imageArray.reshape(numOfImages,rows,columns)\n\ndef getLabelFromMNIST(labelBytes):\n assert(type(labelBytes) is bytes),'parameter is no bytes'\n magicNum, numOfLabels = struct.unpack('>2i',labelBytes[:8]) # 读入校验数据(无用处)和标签数量\n # 每个标签大小为unsigned byte, 一次读入所有标签\n labelSize = 1 * numOfLabels\n labelTuple = struct.unpack('>'+str(labelSize)+'B',labelBytes[8:])\n labelArray = np.array(labelTuple) \n return labelArray.reshape(labelSize,1)\n\ntrainImage = getImageFromMNIST(trainImage)\ntrainLabel = getLabelFromMNIST(trainLabel)\ntestImage = getImageFromMNIST(testImage)\ntestLabel = getLabelFromMNIST(testLabel)\n\nprint('trainImage.shape:',trainImage.shape)\nprint('trainLabel.shape:',trainLabel.shape)\nprint('testImage.shape:',testImage.shape)\nprint('testLabel.shape:',testLabel.shape)\n\n\n# ## 输入数据定义trainImage, trainLabel, testImage, testLabel\n# \n# ### 预处理数据\n# $28 \\times 28 = 784$\n# \n# **图片会除以255, 让值落在[0,1]区间上**\n# \n# $$\n# value_i = \\frac {value_i} {\\max(value_i)}\n# $$\n# \n# 处理后,\n# ```python\n# trainImage.shape = (784, 60000) # (60000, 28, 28) -> (784, 60000)\n# trainLabel.shape = (10, 60000) # (60000,1) -> (10, 60000)\n# testImage.shape = (784, 10000) # (10000, 28, 28) -> (784, 10000)\n# testLabel.shape = (10, 10000) # (10000, 1) -> (10, 10000)\n# ```\n# \n# #### 数据含义\n# \n# 如英文名, train代表训练集, test代表测试集, Image代表图片数据, Label代表图片标签\n# ```python\n# trainImage[:,i]会得到shape = (784,) 的一列数据, 代表一张图片\n# testLabel[:,i]会得到shape = (10,) 的一列数据, 代表一个标签\n# ```\n\n# In[3]:\n\n\n# (number, 28, 28) -> (784, number)\ndef changeImage(image):\n image = image.reshape(-1,28*28)\n image = image.T # 转置, 让图片数据变为列表示\n image = image.astype(np.float16) # 重置数据类型为numpy.float16\n return image / 255.0 # 归一化, 让值落在[0,1]上, 方便训练\n\n# (number, 1) -> (10, number)\ndef changeLabel(label):\n assert(label.shape[1] == 1)\n column = np.arange(10) # get 0~9, shape = (10,)\n column = column.reshape(10,1) # to column\n return label.T == column # use broadcasting to generate True or False label\n \ntrainImage = changeImage(trainImage)\ntrainLabel = changeLabel(trainLabel)\ntestImage = changeImage(testImage)\ntestLabel = changeLabel(testLabel)\n\nprint('trainImage.shape:',trainImage.shape)\nprint('trainLabel.shape:',trainLabel.shape)\nprint('testImage.shape:',testImage.shape)\nprint('testLabel.shape:',testLabel.shape)\n\n\n# ## 方法描述\n# \n# ### 激活函数\n# \n# 隐层都使用Relu激活函数, 输出层使用softmax\n# \n# ### 损失函数(使用了交叉熵) (Extra!)\n# \n# \n# \n# $$\n# 对于每个数据, 使用交叉熵, Lost = - \\sum _{j=0} ^{9} y_j \\ln a_j\n# $$\n# \n# * $y_j$ : 实际值, 0或1\n# * $\\ln a_j$ : 激活值(预测值), 小数\n# \n# ### L2正则化\n# \n# $$\n# Cost = \\sum _{i=1} ^{m} Lost_i + \\frac {\\lambda} {2} \\sum _ W \\Vert {W} \\Vert ^2\n# $$\n# \n# * $\\lambda$ : L2正则化系数\n# * W : 网络层与网络层之间的权重矩阵\n# \n# \n# ### dropout (Extra!)\n# \n# 有2个dropout\n# \n# * 在隐层(hidden layers)上的dropout\n# * 在输入层(input layers)上的dropout\n# \n# ### random mini batch (Extra!)\n# \n# 为了加快训练速度, 使用了随机mini batch\n# \n# 可调batch size, 这里使用128的batch size\n# \n# 即在每次迭代中, 会从训练集中随机挑选128个样本以及其对应的标签来进行正向传播, 反向传播, 更新参数\n# \n# ### 载入自己实现的神经网络模块\n\n# In[4]:\n\n\nimport os\nimport importlib # 使用这个可以重新import模块\nimport NN # 自己写的神经网络模块\n\n\n# ### 初始化网络\n# \n# #### 网络层数及数量: 下方cell里的代码\n# \n# #### w和b\n# \n# $w = randn(L_i, L_{i-1}) \\times \\sqrt{\\frac 2 {L_{i-1}}}$\n# \n# $b = 0, shape = (L_i, 1)$\n# \n# * $L_i$ : 第i层网络的节点数量\n# \n# 对应代码为\n# \n# ```python\n# self.parameters['W'+str(i)] = np.random.randn(layers[i],layers[i-1]) * np.sqrt(2 / layers[i-1])\n# self.parameters['b'+str(i)] = np.zeros((layers[i],1))\n# ```\n# \n# #### L2正则化系数$\\lambda = 0.001$\n# \n# #### dropout\n# \n# * 在隐层(hidden layers)上的dropout: 下方代码\n# * 在输入层(input layers)上的dropout: 下方代码\n# \n# #### mini bath size : 128\n# \n# #### 学习率\n# \n# 恒定学习率,但是可以停下来,修改学习率后,继续训练\n# \n# 手动控制学习率和迭代次数\n# \n# 对每次的训练进行多重组合\n\n# In[5]:\n\n\nnp.random.seed(1) # 改变随机数种子, 方便复现bug\ndata = { # 训练数据\n'trainX':trainImage,\n'trainY':trainLabel\n}\n\nlayers = [784,500, 300, 200, 10] # 784代表输入数据规模, layers[i] 代表第 i 层网络的节点数\nprint('layers info:',layers)\n\nhy = {\n 'open-dropout': True,\n 'dropout': 0.8,\n 'dropout-input': 0.9\n}\n\n#######################################################\n\nrelu = lambda z:np.maximum(0.01 * z,z) # Relu 激活函数\nrelu_deriv = lambda z,a:np.where(z > 0,1.0,0.01) # Relu 激活函数的导数\n\ndef lostFunc(A,Y):\n assert(A.shape == Y.shape),'A.shape != Y.shape'\n # A and Y are matrix, but we just want to operation on column, so use axis = 0\n ylna = np.multiply(Y,np.log(A+1e-10)) # add 1e-10 to forbidden np.log(0)\n return -np.sum(ylna,axis = 0,keepdims=True) # remember add minus symbol \"-\"\n \ndef softmax(Z): # Z is a column vector, but we need to handle when Z is a matrix, use axis = 0\n maxNumber = np.max(Z,axis = 0,keepdims=True)\n assert(Z.shape[1] == maxNumber.shape[1])\n Z -= maxNumber\n Zexp = np.exp(Z)\n return Zexp / np.sum(Zexp,axis = 0,keepdims=True)\n\ndef softmax_deriv(Z,A,Y): # softmax 的导数\n assert(A.shape == Y.shape),'A.shape is not same as Y.shape'\n return A - Y\n\n\n\ndef getFunction(layers): # 初始化function\n function = {\n 'activation':{},\n 'derivative':{},\n 'lostFunction':lostFunc, # lambda a,y:np.sum(np.multiply(-y,np.log(a)),axis = 0), # (AL,Y) -> Lost(AL,Y)\n 'predictFunction':lambda A:(A,A>=np.max(A,axis = 0)), # (A,preA)\n 'accuracyFunction':lambda A,Y:1.0/Y.shape[1] * np.sum((np.sum(A==Y,axis = 0,keepdims=True) == 10))\n }\n L = len(layers) - 1\n for i in range(1,L):\n function['activation'][i] = relu\n function['derivative'][i] = relu_deriv\n function['activation'][L] = softmax\n function['derivative'][L] = lambda Z,A,Y:softmax_deriv(Z,A,Y)\n return function\n\nfunction = getFunction(layers)\n\nimportlib.reload(NN) # 重新import模块, 便于修改后重新import\nmyNN = NN.NN(data, layers, function, hy) # 初始化网络!!!!!!\n\n\n# #### 开始训练\n# \n# 学习率为`learningRate`, 阶梯式降低\n# \n# batchSize = 128\n# \n# 每次的迭代次数不同\n\n# In[6]:\n\n\n\ncosts = myNN.miniBatchRandom(learningRate=0.7,batchSize=128,batchTimes=100,getCost=True)\ncosts = costs + myNN.miniBatchRandom(learningRate=0.4,batchSize=128,batchTimes=100,getCost=True)\ncosts = costs + myNN.miniBatchRandom(learningRate=0.1,batchSize=128,batchTimes=100,getCost=True)\ncosts = costs + myNN.miniBatchRandom(learningRate=0.02,batchSize=128,batchTimes=100,getCost=True)\ncosts = costs + myNN.miniBatchRandom(learningRate=0.0001,batchSize=128,batchTimes=200,getCost=True)\ncosts = costs + myNN.miniBatchRandom(learningRate=0.000001,batchSize=128,batchTimes=300,getCost=True)\ncosts = costs + myNN.miniBatchRandom(learningRate=0.0000001,batchSize=128,batchTimes=300,getCost=True)\n\n# 绘制cost函数曲线\n\nplt.plot(costs)\nplt.title('costs')\nplt.xlabel(\"iteration times\")\nplt.ylabel(\"cost\")\nplt.show()\n\n# 在预测时候,将输入的dropout删除\nif \"dropout-input\" in myNN.hyperParameters: \n print('delete dropout in input')\n myNN.hyperParameters.pop('dropout-input')\nelse:\n pass\n\ntrainPre,trainPreBool = myNN.predict(X=trainImage)\nprint('train accuracy:',myNN.accuracy(trainPreBool,trainLabel))\n\ntestPre,testPreBool = myNN.predict(X=testImage)\nprint('test 1~10000 accuracy:',myNN.accuracy(testPreBool,testLabel))\n\ntestPre,testPreBool = myNN.predict(X=testImage[:,:5000])\nprint('test 1~5000 accuracy:',myNN.accuracy(testPreBool,testLabel[:,:5000])) # 前5000个样本的正确率\n\ntestPre,testPreBool = myNN.predict(X=testImage[:,5000:])\nprint('test 5000~10000 accuracy:',myNN.accuracy(testPreBool,testLabel[:,5000:])) # 后5000个样本的正确率\n\n\n# ## 检测算法正确性(测试用)\n# \n# 在作业中保留是因为在写文档的时候,也需要做一些测试,检查的时候可以跳过这里\n\n# In[7]:\n\n\nlist2boolTuple = lambda l:list(zip(range(10), l)) # list 到二元组(数字,bool),方便查看结果\n\nindex = 9000 # 选中的测试图片\nprint(\"真实标签:\")\nprint(list2boolTuple(testLabel[:,index]))\nnowPre,nowPreBool = myNN.predict(X = testImage[:,index].reshape(-1,1))\nnowImage = testImage[:,index]\nnowImage = np.array(nowImage) # 创建新数据, 防止数据被改变\nnowImage = nowImage.reshape(28,28) # 返回图片的原始形状\nnowImage *= 255 # 恢复为0~255的灰度值\nnowImage = nowImage.astype(int) # float不支持转为图片, 所以用int\n\nplt.imshow(Image.fromarray(nowImage))\nplt.show()\nprint('-'*30)\nprint(\"预测标签\")\nprint(list2boolTuple(nowPreBool.T.tolist()[0]))\nprint(list2boolTuple(nowPre.T.tolist()[0]))\n\n\n# ## 测试自己的图片\n# \n# 会打开自己本地画的一张名为“number.bmp”的图片\n# \n# 然后用训练好的神经网络来看看这个图片应该被归类到哪个数字\n\n# In[8]:\n\n\nmyNumber = Image.open('number.bmp') # 打开图片,注意图片本身的格式, 如果是256色模式, 就不用将RGB相加了\n\nplt.subplot(121)\nplt.imshow(myNumber) # 显示图片\nplt.title('origin picture')\n\nplt.subplot(122)\nmyNumber = myNumber.resize((28,28),Image.ANTIALIAS) # 重置大小, 宽度为28, 高度为28\nplt.imshow(myNumber) # 显示resize后的图片\nplt.title('after resize')\nplt.show()\n\n\nmyNumberArr = np.array(myNumber) # 从图片获取 array\n# print(myNumberArr.shape)\n\n# 归一化后才可以predict\nmyNumberArr = myNumberArr / 255\nmyNumberArr = myNumberArr.reshape(28*28,1)\nmyPre,myPreBool = myNN.predict(X=myNumberArr)\n\n# print(myPreBool.T)\n# plt.figure()\nprobablity = (softmax(myPre).T.reshape(10,) * 100).tolist()\n# print(probablity)\npreNumber = probablity.index(max(probablity))\nplt.title(str(preNumber)+' : '+str(round(max(probablity),2))+'%')\nplt.bar([i for i in range(10)],probablity,[0.5 for i in range(10)], align = 'center')\nplt.xlabel(\"number\")\nplt.ylabel(\"probility\")\nplt.show()\n\n\n# ## 结果分析\n# \n# ### hyperparameters分析\n# \n# 我用的参数是, 输入层为784\n# \n# 隐层有3层, 分别为500, 300, 200\n# \n# 输出层为10\n# \n# L2正则化中的lambda系数为0.001\n# \n# dropout在输入层上为0.9, 在隐层上为0.8\n# \n# ### 训练集分析与测试集分析\n# \n# 测试集上对于全部总共10000张图片的正确率为`0.9507`\n# 对于前5000张图片的正确率为`0.9316`,\n# 后5000张图片的正确率为`0.9698`\n# 符合在数据描述文档中,前5000张图片较难识别的描述\n# \n# 训练集上的正确率为`0.951183333333`\n# \n# 训练集和测试集的正确率均为95%左右,方差较大,但是偏差较小(cost相差较小)\n# \n# 图片的识别率有待提高, 因为无论在训练集, 还是在测试集上, 识别率都只有95%左右\n# \n# overfitting貌似没有出现\n# \n# \n","sub_path":"handwritten digits recognition/MNIST.py","file_name":"MNIST.py","file_ext":"py","file_size_in_byte":13242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"109007937","text":"\"\"\"\n236. Lowest Common Ancestor of a Binary Tree\nMedium\n\n1870\n\n133\n\nFavorite\n\nShare\nGiven a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.\n\nAccording to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).”\n\nGiven the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]\n\n\n \n\nExample 1:\n\nInput: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1\nOutput: 3\nExplanation: The LCA of nodes 5 and 1 is 3.\nExample 2:\n\nInput: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4\nOutput: 5\nExplanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of itself according to the LCA definition.\n\"\"\"\n\nclass TreeNode:\n def __init__(self, data):\n self.val = data\n self.left = None\n self.right = None\n\n def __repr__(self):\n return \"val {} \".format(self.val)\n\nclass BinaryTree:\n def __init__(self, node):\n self.root = TreeNode(node)\n\n def processTree(self,p,q):\n result = self.lowestAncestor(self.root, p, q)\n return result\n\n def lowestAncestor(self, root, p, q):\n \"\"\"Post order traversal - left->right->root\"\"\"\n if root is None:\n return\n if root.val == p or root.val == q:\n return root.val\n left = self.lowestAncestor(root.left, p, q)\n right = self.lowestAncestor(root.right, p, q)\n \n if left != None and right != None:\n return root\n if left == None and right != None:\n print(\"RI \",right)\n return right\n elif right == None and left != None:\n print(\"L \",left)\n return left\n return None\n\ntree = BinaryTree(3)\ntree.root.left = TreeNode(5)\ntree.root.right = TreeNode(1)\ntree.root.left.left = TreeNode(6)\ntree.root.left.right = TreeNode(2)\ntree.root.right.left = TreeNode(0)\ntree.root.right.right = TreeNode(8)\n\nprint(tree.processTree(5,1))\n\n\"\"\"\n6 2 5 0 8 1 3\n\"\"\"\n\n\n\n\n\n","sub_path":"fb/lowest_common_ancestor.py","file_name":"lowest_common_ancestor.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"111230380","text":"#Problem 6\nimport copy\n\n# parameter1: Input List\n# • parameter2: Integer, number of celebrities appearing together.\n# • parameter3: helper List for recursion enumerating\n# • parameter4: helper Integer for recursion indexing\n# • Return: Nothing\n# • Print: All possible Combinations\n\ndef generateBillboard(casts, num, L, idx):\n if num ==len(L):\n print(L)\n return\n elif idx == len(casts):\n return\n else:\n L_copy = copy.deepcopy(L)\n L_copy.append(casts[idx])\n generateBillboard(casts, num,L,idx+1)\n generateBillboard(casts, num,L_copy,idx+1)\n\ncasts = ['Johnny Depp', \"Al Pacino\", \"Robert De Niro\",\"Kevin Spacey\", \"Denzel Washington\", \"Russell Crowe\", \"Brad Pitt\"]\n\ngenerateBillboard(casts, 2, [], 0)\n","sub_path":"Recursion/Combination.py","file_name":"Combination.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"568628958","text":"from rest_framework import viewsets, status\nfrom django_filters import rest_framework as filters\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import cache_page\nfrom django.views.decorators.vary import vary_on_cookie\nfrom django.conf import settings\n\nfrom .models import MyCoin\nfrom .serializers import MyCoinSerializer, ConvertSerializer\nfrom backend.services import Convert\nfrom .filterset import ConvertFilter, MyCoinFilter\n\n\nclass MyCoinViewSet(viewsets.ModelViewSet):\n queryset = MyCoin.objects.all()\n serializer_class = MyCoinSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = MyCoinFilter\n\n @action(\n detail=False,\n methods=['get'],\n serializer_class=ConvertSerializer,\n filter_class=ConvertFilter\n )\n def convert(self, request):\n from_coin = request.query_params.get('from_coin')\n to_coin = request.query_params.get('to')\n amount = float(request.query_params.get('amount', '1'))\n\n convert = Convert(from_coin=from_coin, to_coin=to_coin, amount=amount)\n # Returns the already converted value\n value_convert = convert.set_convert()\n\n if value_convert > 0:\n convert_obj = Convert(\n from_coin=from_coin, to_coin=to_coin, amount=value_convert\n )\n serializer = self.get_serializer(convert_obj)\n return Response(serializer.data)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n @method_decorator(vary_on_cookie)\n @method_decorator(cache_page(settings.CACHE_TTL))\n def dispatch(self, *args, **kwargs):\n return super(MyCoinViewSet, self).dispatch(*args, **kwargs)\n","sub_path":"backend/core/views_api.py","file_name":"views_api.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"206384087","text":"#!/bin/python\nfrom sys import argv\nimport json\nimport re\n\n#emojiRe = re.compile(u'['\n# u'\\U0001F300-\\U0001F64F'\n# u'\\U0001F680-\\U0001F6FF'\n# u'\\u2600-\\u26FF\\u2700-\\u27BF]+', \n# re.UNICODE)\n\nfile_name = argv[1]\n\n# remove re-tweets\n\ndef removeRetweets():\n\tdata = open(file_name).readlines()\n\toutfile = open(file_name + '_out', 'w')\n\tfor line in data:\n\t\ttweet = json.loads(line)\n\t\tif not re.search('^RT', tweet['text']):\n\t\t\toutfile.write(line)\n\n\n\ndef removeStuff(line):\n\twords = line.lower().split()\n\treturn_string = \"\"\n\tfor word in words:\n\t\tif not (('@' in word) or ('http' in word) or (len(word) <= 2) or ('#' in word)):\n\t\t\treturn_string += word + ' '\n\treturn removeEmoji(return_string)\n\n\n\ndef removeHandlesFromFile(file_name):\n\tfile_ = open(file_name).readlines()\n\tfile_out = open(file_name + '_clean', 'w')\n\tfor line in file_:\n\t\ttweet = json.loads(line)\n\t\ttweet_text = tweet['text']\n\t\ttweet['text'] = removeStuff(tweet_text)\n\t\tfile_out.write(json.dumps(tweet) + '\\n')\n\t\t#print removeStuff(tweet_text)\n\ndef checkWord(word):\n\tfor c in word:\n\t\tif ord(c) >= 127:\n\t\t\treturn False\n\treturn True\n\n# UTF-8 Encoding of some emojis to take into consideration\n# need to map the relevant emoijs into a text contant\n# need to preappend negative words with NOT or something to deal with negating positive words\nemojiMap = {\n\t'TEARS_OF_JOY': '\\xF0\\x9F\\x98\\x81',\n\t'SMILING_FACE1': '\\xF0\\x9F\\x98\\x83',\n\t'SMILING_FACE2': '\\xF0\\x9F\\x98\\x84',\n\t'SMILING_FACE3': '\\xF0\\x9F\\x98\\x85',\n\t'SMILING_EYES': '\\xF0\\x9F\\x98\\x8A'\n\n}\n\n\ndef removeEmoji(line):\n\tl = [ord(c) for c in line]\n\treturn_string = \"\"\n\tfor word in line.split():\n\t\tif checkWord(word):\n\t\t\treturn_string += word + ' '\n\treturn return_string\n\nremoveRetweets()\nremoveHandlesFromFile(file_name + '_out')\n\n","sub_path":"preprocessor/stream-pre-processor.py","file_name":"stream-pre-processor.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"233563802","text":"from time import sleep\nimport threading\n\n# add logging support\nimport logging\nmod_log = logging.getLogger('airpi.LcdScroller')\n\nclass LcdScroller(threading.Thread):\n def __init__(self, lcdpanel, rows, cols, delay, sl, data):\n threading.Thread.__init__(self)\n self.log = logging.getLogger('airpi.LcdScroller')\n self.running = False\n self.lcd = lcdpanel\n self.rows = rows\n self.cols = cols\n self.delay = delay\n self.sl = sl\n self.data = data\n self.bl = 0\n\n def run(self):\n self.running = True\n start = [0] * self.rows\n finish = [self.cols - 1] * self.rows\n try:\n while self.running:\n # self.log.debug(\"Start/Finish: {0} {1} {2}\".format(start, finish, map(len, data)))\n # scroll through the data\n disp_str = u\"\"\n for i in range(self.rows):\n # handle non scrolling data\n if self.sl[i] == 0:\n disp_str = self.data[i]\n else:\n # scroll data\n if finish[i] <= len(self.data[i]):\n disp_str = self.data[i][start[i]:finish[i]]\n else:\n disp_str = self.data[i][start[i]:len(self.data[i])] + self.data[i][:finish[i] - len(self.data[i])]\n start[i] += 1\n finish[i] += 1\n if start[i] == len(self.data[i]):\n start[i] = 0\n finish[i] = self.cols - 1\n\n # self.log.debug(u\"Display string: {0} [{1}] {2}\".format(i + 1, disp_str, self.bl))\n self.lcd.display_string(disp_str, i + 1, bl=self.bl)\n sleep(self.delay)\n except Exception as e:\n self.log.error(\"Error Scrolling lines: {0}\".format(e))\n raise\n\n def stopScroller(self):\n self.running = False\n\n def updData(self, sl, data, bl):\n self.sl = sl\n self.data = data\n self.bl = bl","sub_path":"outputs/LcdScroller.py","file_name":"LcdScroller.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"352046318","text":"import requests\nimport json\n\nclass TvMazeApi:\n baseUrl = \"\"\n\n def __init__(self, tvmazeId):\n if tvmazeId is None:\n return\n self.baseUrl = \"http://api.tvmaze.com/shows/\"+tvmazeId\n \n def getEpisodeUrl(self, season, number):\n if not season or not number:\n return \"\"\n payload = {\"season\": season, \"number\": number}\n response = requests.get(self.baseUrl + \"/episodebynumber\", params=payload)\n if response.status_code != 200:\n raise ValueError(\n 'Request to slack returned an error %s, the response is:\\n%s'\n % (response.status_code, response.text)\n )\n data = dict(json.loads(response.text))\n return data.get(\"url\", \"\")","sub_path":"tvmaze.py","file_name":"tvmaze.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"272080459","text":"import time\n\nfrom cassandra import ConsistencyLevel, Timeout, Unavailable\nfrom cassandra.query import SimpleStatement\n\nfrom assertions import assert_invalid, assert_unavailable\nfrom dtest import Tester, debug\nfrom tools import since\n\n\nclass TestBatch(Tester):\n\n def counter_batch_accepts_counter_mutations_test(self):\n \"\"\" Test that counter batch accepts counter mutations \"\"\"\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN COUNTER BATCH\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://foo.com'\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://bar.com'\n UPDATE clicks SET total = total + 1 WHERE userid = 2 and url = 'http://baz.com'\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT total FROM clicks\")\n assert [list(rows[0]), list(rows[1]), list(rows[2])] == [[1], [1], [1]], rows\n\n def counter_batch_rejects_regular_mutations_test(self):\n \"\"\" Test that counter batch rejects non-counter mutations \"\"\"\n session = self.prepare()\n if self.cluster.version() < '2.1':\n err = \"Only counter mutations are allowed in COUNTER batches\"\n else:\n err = \"Cannot include non-counter statement in a counter batch\"\n\n assert_invalid(session, \"\"\"\n BEGIN COUNTER BATCH\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://foo.com'\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://bar.com'\n UPDATE clicks SET total = total + 1 WHERE userid = 2 and url = 'http://baz.com'\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n APPLY BATCH\n \"\"\", matching=err)\n\n def logged_batch_accepts_regular_mutations_test(self):\n \"\"\" Test that logged batch accepts regular mutations \"\"\"\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT * FROM users\")\n res = sorted(rows)\n assert [list(res[0]), list(res[1])] == [[0, u'Jack', u'Sparrow'], [1, u'Will', u'Turner']], res\n\n @since('3.0')\n def logged_batch_gcgs_below_threshold_single_table_test(self):\n \"\"\" Test that logged batch accepts regular mutations \"\"\"\n session = self.prepare()\n\n # Single table\n session.execute(\"ALTER TABLE users WITH gc_grace_seconds = 0\")\n session.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")\n node1 = self.cluster.nodelist()[0]\n warning = node1.grep_log(\"Executing a LOGGED BATCH on table \\[ks.users\\], configured with a \"\n \"gc_grace_seconds of 0. The gc_grace_seconds is used to TTL \"\n \"batchlog entries, so setting gc_grace_seconds too low on tables \"\n \"involved in an atomic batch might cause batchlog entries to expire \"\n \"before being replayed.\")\n debug(warning)\n self.assertEquals(1, len(warning), \"Cannot find the gc_grace_seconds warning message.\")\n\n @since('3.0')\n def logged_batch_gcgs_below_threshold_multi_table_test(self):\n \"\"\" Test that logged batch accepts regular mutations \"\"\"\n session = self.prepare()\n session.execute(\"ALTER TABLE users WITH gc_grace_seconds = 0\")\n session.execute(\"\"\"\n CREATE TABLE views (\n userid int,\n url text,\n PRIMARY KEY (userid, url)\n ) WITH gc_grace_seconds = 0;\n \"\"\")\n session.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO views (userid, url) VALUES (1, 'Will')\n APPLY BATCH\n \"\"\")\n node1 = self.cluster.nodelist()[0]\n warning = node1.grep_log(\"Executing a LOGGED BATCH on tables \\[ks.views, ks.users\\], configured with a \"\n \"gc_grace_seconds of 0. The gc_grace_seconds is used to TTL \"\n \"batchlog entries, so setting gc_grace_seconds too low on tables \"\n \"involved in an atomic batch might cause batchlog entries to expire \"\n \"before being replayed.\")\n debug(warning)\n self.assertEquals(1, len(warning), \"Cannot find the gc_grace_seconds warning message.\")\n\n @since('3.0')\n def unlogged_batch_gcgs_below_threshold_should_not_print_warning_test(self):\n \"\"\" Test that logged batch accepts regular mutations \"\"\"\n session = self.prepare()\n session.execute(\"ALTER TABLE users WITH gc_grace_seconds = 0\")\n session.execute(\"\"\"\n BEGIN UNLOGGED BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")\n node1 = self.cluster.nodelist()[0]\n warning = node1.grep_log(\"setting a too low gc_grace_seconds on tables involved in an atomic batch\")\n debug(warning)\n self.assertEquals(0, len(warning), \"Cannot find the gc_grace_seconds warning message.\")\n\n def logged_batch_rejects_counter_mutations_test(self):\n \"\"\" Test that logged batch rejects counter mutations \"\"\"\n session = self.prepare()\n if self.cluster.version() < '2.1':\n err = \"Counter mutations are only allowed in COUNTER batches\"\n else:\n err = \"Cannot include a counter statement in a logged batch\"\n\n assert_invalid(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://foo.com'\n APPLY BATCH\n \"\"\", matching=err)\n\n def unlogged_batch_accepts_regular_mutations_test(self):\n \"\"\" Test that unlogged batch accepts regular mutations \"\"\"\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN UNLOGGED BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (2, 'Elizabeth', 'Swann')\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT * FROM users\")\n res = sorted(rows)\n assert [list(res[0]), list(res[1])] == [[0, u'Jack', u'Sparrow'], [2, u'Elizabeth', u'Swann']], res\n\n def unlogged_batch_rejects_counter_mutations_test(self):\n \"\"\" Test that unlogged batch rejects counter mutations \"\"\"\n session = self.prepare()\n if self.cluster.version() < '2.1':\n err = \"Counter mutations are only allowed in COUNTER batches\"\n else:\n err = \"Counter and non-counter mutations cannot exist in the same batch\"\n\n assert_invalid(session, \"\"\"\n BEGIN UNLOGGED BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (2, 'Elizabeth', 'Swann')\n UPDATE clicks SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'\n APPLY BATCH\n \"\"\", matching=err)\n\n def logged_batch_throws_uae_test(self):\n \"\"\" Test that logged batch throws UAE if there aren't enough live nodes \"\"\"\n session = self.prepare(nodes=3)\n [node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:]]\n session.consistency_level = 'ONE'\n assert_unavailable(session.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")\n\n def logged_batch_doesnt_throw_uae_test(self):\n \"\"\" Test that logged batch DOES NOT throw UAE if there are at least 2 live nodes \"\"\"\n session = self.prepare(nodes=3)\n self.cluster.nodelist()[-1].stop(wait_other_notice=True)\n query = SimpleStatement(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", consistency_level=ConsistencyLevel.ANY)\n session.execute(query)\n assert True\n\n def acknowledged_by_batchlog_not_set_when_batchlog_write_fails_test(self):\n \"\"\" Test that acknowledged_by_batchlog is False if batchlog can't be written \"\"\"\n session = self.prepare(nodes=3, compression=False)\n # kill 2 of the 3 nodes (all the batchlog write candidates).\n [node.stop(gently=False) for node in self.cluster.nodelist()[1:]]\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.ONE, received_responses=0)\n\n def acknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n \"\"\" Test that acknowledged_by_batchlog is True if batchlog can be written \"\"\"\n session = self.prepare(nodes=3, compression=False)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, received_responses=2)\n\n def batch_uses_proper_timestamp_test(self):\n \"\"\" Test that each statement will be executed with provided BATCH timestamp \"\"\"\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN BATCH USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT id, writetime(firstname), writetime(lastname) FROM users\")\n res = sorted(rows)\n assert [list(res[0]), list(res[1])] == [[0, 1111111111111111, 1111111111111111], [1, 1111111111111111, 1111111111111111]], res\n\n def only_one_timestamp_is_valid_test(self):\n \"\"\" Test that TIMESTAMP must not be used in the statements within the batch. \"\"\"\n session = self.prepare()\n assert_invalid(session, \"\"\"\n BEGIN BATCH USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') USING TIMESTAMP 2\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", matching=\"Timestamp must be set either on BATCH or individual statements\")\n\n def each_statement_in_batch_uses_proper_timestamp_test(self):\n \"\"\" Test that each statement will be executed with its own timestamp \"\"\"\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner') USING TIMESTAMP 1111111111111112\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT id, writetime(firstname), writetime(lastname) FROM users\")\n res = sorted(rows)\n assert [list(res[0]), list(res[1])] == [[0, 1111111111111111, 1111111111111111], [1, 1111111111111112, 1111111111111112]], res\n\n def assert_timedout(self, session, query, cl, acknowledged_by=None,\n received_responses=None):\n try:\n statement = SimpleStatement(query, consistency_level=cl)\n session.execute(statement, timeout=None)\n except Timeout as e:\n if received_responses is not None:\n msg = \"Expecting received_responses to be {}, got: {}\".format(\n received_responses, e.received_responses,)\n self.assertEqual(e.received_responses, received_responses, msg)\n except Unavailable as e:\n if received_responses is not None:\n msg = \"Expecting alive_replicas to be {}, got: {}\".format(\n received_responses, e.alive_replicas,)\n self.assertEqual(e.alive_replicas, received_responses, msg)\n except Exception as e:\n assert False, \"Expecting TimedOutException, got:\" + str(e)\n else:\n assert False, \"Expecting TimedOutException but no exception was raised\"\n\n def prepare(self, nodes=1, compression=True):\n if not self.cluster.nodelist():\n self.cluster.populate(nodes).start(wait_other_notice=True)\n\n node1 = self.cluster.nodelist()[0]\n session = self.patient_cql_connection(node1)\n self.create_ks(session, 'ks', nodes)\n session.execute(\"\"\"\n CREATE TABLE clicks (\n userid int,\n url text,\n total counter,\n PRIMARY KEY (userid, url)\n );\n \"\"\")\n session.execute(\"\"\"\n CREATE TABLE users (\n id int,\n firstname text,\n lastname text,\n PRIMARY KEY (id)\n );\n \"\"\")\n time.sleep(.5)\n return session\n","sub_path":"batch_test.py","file_name":"batch_test.py","file_ext":"py","file_size_in_byte":14282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"251016952","text":"from django import forms\nfrom django.forms import ModelForm\nfrom ordenamiento.models import Parroquia, Barrio\n\n\nclass ParroquiaForm(ModelForm): \n class Meta:\n model = Parroquia\n fields = ['nombre', 'tipo_parroquia'] \n\nclass BarrioForm(ModelForm):\n class Meta:\n model = Barrio\n fields = ['nombre','viviendas','parques', 'edificios', 'parroquia']\n\n\n\nclass BarrioParroquiaForm(ModelForm): \n \n def __init__(self, nombre, *args, **kwargs): \n super(BarrioParroquiaForm, self).__init__(*args, **kwargs) \n self.initial['parroquia'] = nombre\n self.fields[\"parroquia\"].widget = forms.widgets.HiddenInput()\n print(nombre)\n \n class Meta:\n model = Barrio\n fields = ['nombre','viviendas','parques', 'edificios', 'parroquia']","sub_path":"proyectociudad/ordenamiento/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"639329740","text":"#!/usr/bin/env python\n\n\n# This document is part of Acronym\n# https://github.com/geowurster/Acronym\n\n\n# =================================================================================== #\n#\n# New BSD License\n#\n# Copyright (c) 2014, Kevin D. Wurster\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * The names of its contributors may not be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# =================================================================================== #\n\n\n\"\"\"Generate a tile index\"\"\"\n\n\nfrom __future__ import unicode_literals\n\nimport inspect\nimport os\nfrom os.path import *\n\nfrom ..components import *\nfrom ... import lidar\nfrom ... import settings\n\ntry:\n from osgeo import gdal\n from osgeo import ogr\n from osgeo import osr\nexcept ImportError:\n import gdal\n import ogr\n import osr\nogr.UseExceptions()\nosr.UseExceptions()\n\n\n#/* ======================================================================= */#\n#/* Document level information\n#/* ======================================================================= */#\n\n__all__ = ['print_help', 'tileindex']\nSUBCOMMAND_NAME = basename(inspect.getfile(inspect.currentframe())).rsplit('.')[0].replace('subcommand_', '')\n\n\n#/* ======================================================================= */#\n#/* Define print_help() function\n#/* ======================================================================= */#\n\ndef print_help():\n\n \"\"\"\n Detailed help information\n\n Returns\n -------\n 1 for exit code purposes\n \"\"\"\n\n global SUBCOMMAND_NAME\n\n # TODO: Populate help\n vprint(\"\"\"\nHelp: {0}\n------{1}\n{2}\n \"\"\".format(SUBCOMMAND_NAME, '-' * len(SUBCOMMAND_NAME), tileindex.__doc__))\n\n\n#/* ======================================================================= */#\n#/* Define print_help() function\n#/* ======================================================================= */#\n\ndef print_usage():\n\n \"\"\"\n Commandline usage information\n\n Returns\n -------\n 1 for exit code purposes\n \"\"\"\n\n global SUBCOMMAND_NAME\n\n # TODO: Populate help\n vprint(\"\"\"\n{0} [-of driver] [-append] [-overwrite] [-s-srs srs_def] [-t-srs srs_def]\n{1} [-dsco NAME=VAL[,N=V]] [-lco NAME=VAL[,N=V]] [-nln name] [-nlt type]\n{1} [-nf name] [-pf name] -o outfile infile [infile ...]\n \"\"\".format(SUBCOMMAND_NAME, ' ' * len(SUBCOMMAND_NAME), tileindex.__doc__))\n\n return 1\n\n\n#/* ======================================================================= */#\n#/* Define print_long_usage() function\n#/* ======================================================================= */#\n\ndef print_long_usage():\n\n \"\"\"\n Detailed commandline usage\n\n Returns\n -------\n 1 for exit code purposes\n \"\"\"\n\n print_usage()\n vprint(\"\"\"Options:\n -of -output-format OGR supported driver\n [default: {0}]\n -s-srs -source-srs All input files are assumed to be in the specified\n SRS - if unspecified, SRS is automatically detected\n -t-srs -target-srs SRS to use for output layer - if unset, it is\n automatically detected form the first input file, and\n if source SRS is different from target, coordinates\n are automatically transformed\n -nln -new-layer-name Output layer name - if using -append and the specified\n layer name exists in the output datasource, the layer\n is appended to - otherwise it is created\n [default: name of output datasource]\n -nlt -new-layer-type Set to output layer type to one of the listed options\n or, if using -append, set to the target layer's type\n Polygon, Polygon25D, MultiPolygon, MultiPolygon25D\n [default: Polygon]\n -nf -name-field Field in the output tile index in which the input\n filename is placed\n [default: name]\n -pf -path-field Field in the output tile index in which the absolute\n path to the input filename is placed\n [default: path]\n -append Append layer to output file\n -overwrite Overwrite output file\n -dsco -ds-creation-options Datasource creation options for output driver\n Ignored if using -append\n -lco -lyr-creation-options Layer creation options for output driver\n Ignored if using -append\n \"\"\".format(settings.DEFAULT_VECTOR_DRIVER))\n\n return 1\n\n\n#/* ======================================================================= */#\n#/* Define get_lidar_extent() function\n#/* ======================================================================= */#\n\ndef get_lidar_extent(i_file):\n\n tile = lidar.io.Read(i_file, 'las13').header\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(tile.min_x, tile.min_y)\n ring.AddPoint(tile.min_x, tile.max_y)\n ring.AddPoint(tile.max_x, tile.max_y)\n ring.AddPoint(tile.max_x, tile.min_y)\n ring.CloseRings()\n\n return ring\n\n\n#/* ======================================================================= */#\n#/* Define get_raster_extent() function\n#/* ======================================================================= */#\n\ndef get_raster_extent(i_file):\n\n \"\"\"\n geotransform[0] = top left x\n geotransform[1] = w-e pixel resolution\n geotransform[2] = rotation, 0 if image is \"north up\"\n geotransform[3] = top left y\n geotransform[4] = rotation, 0 if image is \"north up\"\n geotransform[5] = n-s pixel resolution\n \"\"\"\n\n ds = gdal.Open(i_file)\n n_cols = ds.RasterXSize\n n_rows = ds.RasterYSize\n gt = ds.GetGeoTransform()\n\n min_x = gt[0]\n min_y = gt[3] + n_cols * gt[4] + n_rows * gt[5]\n max_x = gt[0] + n_cols * gt[1] + n_rows * gt[2]\n max_y = gt[3]\n\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(min_x, min_y)\n ring.AddPoint(min_x, max_y)\n ring.AddPoint(max_x, max_y)\n ring.AddPoint(max_x, min_y)\n ring.CloseRings()\n\n return ring\n\n\n#/* ======================================================================= */#\n#/* Define get_vector_extent() function\n#/* ======================================================================= */#\n\ndef get_vector_extent(i_layer):\n\n min_x, max_x, min_y, max_y = i_layer.GetExtent()\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(min_x, min_y)\n ring.AddPoint(min_x, max_y)\n ring.AddPoint(max_x, max_y)\n ring.AddPoint(max_x, min_y)\n ring.CloseRings()\n\n return ring\n\n\n#/* ======================================================================= */#\n#/* Define main() function\n#/* ======================================================================= */#\n\ndef tileindex(datatype, args):\n\n \"\"\"\n Generate a tile index - can handle raster, vector, and LiDAR\n \"\"\"\n\n #/* ----------------------------------------------------------------------- */#\n #/* Print usage and check datatype\n #/* ----------------------------------------------------------------------- */#\n\n datatype = datatype.lower()\n datatype_options = ('raster', 'vector', 'lidar')\n if datatype not in datatype_options:\n raise ValueError(\"Invalid datatype '%s' - options are: %s\" % (datatype, datatype_options))\n\n if len(args) is 0:\n return print_usage()\n\n #/* ----------------------------------------------------------------------- */#\n #/* Defaults\n #/* ----------------------------------------------------------------------- */#\n\n output_driver_name = settings.DEFAULT_VECTOR_DRIVER\n output_lco = []\n output_dsco = []\n overwrite_mode = False\n append_mode = False\n new_layer_name = None\n new_layer_type = 'polygon'\n name_field = 'name'\n path_field = 'path'\n\n #/* ----------------------------------------------------------------------- */#\n #/* Containers\n #/* ----------------------------------------------------------------------- */#\n\n files_to_index = []\n output_file = None\n source_srs = None\n target_srs = None\n\n #/* ----------------------------------------------------------------------- */#\n #/* Parse arguments\n #/* ----------------------------------------------------------------------- */#\n\n i = 0\n arg = None\n arg_error = False\n while i < len(args):\n\n try:\n arg = args[i]\n\n # Help arguments\n if arg in ('--help-info', '-help-info', '--helpinfo', '-help-info', '-h', '--h'):\n return print_help_info()\n elif arg in ('--help', '-help', '-h'):\n return print_help()\n elif arg in ('--usage', '-usage', '-u'):\n return print_usage()\n elif arg in ('--long-usage', '-long-usage', '-lu'):\n return print_long_usage()\n elif arg in ('--version', '-version'):\n return print_version()\n elif arg in ('--short-version', '-short-version', '-sv'):\n return print_short_version()\n elif arg in ('--license', '-license'):\n return print_license()\n\n # I/O\n elif arg in ('-o', '-output', '--output'):\n i += 2\n output_file = abspath(expanduser(args[i - 1]))\n elif arg in ('-append', '--append'):\n i += 1\n append_mode = True\n elif arg in ('-overwrite', '--overwrite'):\n i += 1\n overwrite_mode = True\n\n # Output fields\n elif arg in ('-nf', '-name-field'):\n i += 2\n name_field = args[i - 1]\n elif arg in ('-pf', '-path-field'):\n i += 2\n path_field = args[i - 1]\n\n # OGR Options\n elif arg in ('-of', '-output-format', '--output-format'):\n i += 2\n output_driver_name = args[i - 1]\n elif arg in ('-lco', '-lyr-creation-options', '--lyr-creation-options', '-layer-creation-options', '--layer-creation-options'):\n i += 1\n while args[i][0] != '-' and i < len(args):\n output_lco += args[i].split(',')\n i += 1\n elif arg in ('-dsco', '-ds-creation-options', '--ds-creation-options', '-datasource-creation-options', '--datasource-creation-options'):\n i += 1\n while args[i][0] != '-' and i < len(args):\n output_dsco += args[i].split(',')\n i += 1\n elif arg in ('-nln', '-new-layer-name', '--new-layer-name'):\n i += 2\n new_layer_name = args[i - 1]\n elif arg in ('-nlt', '-new-layer-type', '--new-layer-type'):\n i += 2\n new_layer_type = args[i - 1]\n\n # Spatial reference\n elif arg in ('-a-srs', '-assign-srs', '--assign-srs', '-a_srs'):\n i += 2\n target_srs = args[i - 1] # A later comparison determines if a transform must happen\n elif arg in ('-s-srs', '-source-srs', '--source-srs', '-s_srs'):\n i += 2\n source_srs = args[i - 1]\n elif arg in ('-t-srs', '-target-srs', '--target-srs', '-t_srs'):\n i += 2\n target_srs = args[i - 1]\n\n # Files to index\n else:\n\n i += 1\n\n if os.access(arg, os.R_OK):\n files_to_index.append(abspath(expanduser(arg)))\n else:\n arg_error = True\n vprint(\"ERROR: Can't find input file: %s\" % arg)\n\n # This catches several conditions:\n # 1. The last argument is a flag that requires parameters but the user did not supply the parameter\n # 2. The arg parser did not properly consume all parameters for an argument\n # 3. The arg parser did not properly iterate the 'i' variable\n # 4. An argument split on '=' doesn't have anything after '=' - e.g. '--output-file='\n except (IndexError, ValueError):\n i += 1\n arg_error = True\n vprint(\"ERROR: An argument has invalid parameters: %s\" % arg)\n\n #/* ----------------------------------------------------------------------- */#\n #/* Validate parameters\n #/* ----------------------------------------------------------------------- */#\n\n bail = False\n\n # Check arguments\n if arg_error:\n bail = True\n vprint(\"ERROR: Did not successfully parse arguments\")\n\n # Check input files - read mode is tested when arguments are parsed\n if not files_to_index:\n bail = True\n vprint(\"ERROR: Need input files\")\n\n # Check output file\n if not output_file:\n bail = True\n vprint(\"ERROR: Need an output file\")\n\n # Check write modes\n if overwrite_mode and append_mode:\n bail = True\n vprint(\"ERROR: Update and overwrite cannot be used simultaneously\")\n\n # Check new layer type\n if new_layer_type.lower() == 'polygon':\n new_layer_type = ogr.wkbPolygon\n elif new_layer_type.lower() == 'polygon25d':\n new_layer_type = ogr.wkbPolygon25D\n else:\n bail = True\n vprint(\"ERROR: Invalid new layer type: %s\" % new_layer_type)\n\n # Exit if something did not pass validation\n if bail:\n return 1\n\n #/* ----------------------------------------------------------------------- */#\n #/* Prep OGR objects\n #/* ----------------------------------------------------------------------- */#\n\n vprint(\"Prepping data ...\")\n\n if not new_layer_name:\n new_layer_name = output_file.rsplit('.')[0].replace('.', '_')\n\n # TODO: Streamline\n # TODO: Fix overwrite vs. append - append should ONLY append to an existing layer, NOT completely overwrite\n\n # Overwrite an existing file\n # OGR only accepts strings, not unicode\n output_driver_name = str(output_driver_name)\n output_file = str(output_file)\n new_layer_name = str(new_layer_name)\n if overwrite_mode:\n output_drv = ogr.GetDriverByName(output_driver_name)\n try:\n output_drv.DeleteDatasource(output_file)\n except RuntimeError:\n pass\n # File didn't exist - create a new datasource\n output_ds = output_drv.CreateDataSource(output_file.rsplit('.')[0].replace('.', '_'))\n\n # Update an existing datasource\n elif append_mode:\n output_ds = ogr.Open(output_file, 1)\n # Output datasource exists and was opened\n if output_ds:\n if new_layer_name in [output_ds.GetLayer(_i).GetName() for _i in output_ds.GetLayerCount()]:\n output_ds.DeleteLayer(new_layer_name)\n\n # Output datasource doesn't exist - create a new datasource\n else:\n output_drv = ogr.GetDriverByName(output_driver_name)\n output_ds = output_drv.CreateDataSource(output_file)\n\n # Creating a new datasource\n else:\n output_drv = ogr.GetDriverByName(str(output_driver_name))\n output_ds = output_drv.CreateDataSource(output_file)\n\n # Create the output layer\n if datatype == 'lidar' and target_srs is None:\n output_ds = None\n output_drv = None\n vprint(\"ERROR: LiDAR SRS detection is not currently implemented - must explicitly set via -t-srs\")\n return 1\n elif datatype == 'raster' and target_srs is None:\n _srs_ds = gdal.Open(files_to_index[0])\n t_srs = osr.SpatialReference()\n t_srs.ImportFromWkt(_srs_ds.GetProjection())\n _srs_ds = None\n elif datatype == 'vector' and target_srs is None:\n _srs_ds = ogr.Open(files_to_index[0])\n _srs_layer = _srs_ds.GetLayer(0)\n t_srs = _srs_layer.GetSpatialRef()\n _srs_layer = None\n _srs_ds = None\n else:\n t_srs = osr.SpatialReference()\n t_srs.SetFromUserInput(target_srs)\n output_layer = output_ds.CreateLayer(new_layer_name, t_srs, new_layer_type)\n\n # Create output fields\n # OGR only accepts strings, not unicode\n name_field = str(name_field)\n path_field = str(path_field)\n field = ogr.FieldDefn(name_field, ogr.OFTString)\n field.SetWidth(254)\n output_layer.CreateField(field)\n field = ogr.FieldDefn(path_field, ogr.OFTString)\n field.SetWidth(254)\n output_layer.CreateField(field)\n\n #/* ----------------------------------------------------------------------- */#\n #/* Process data\n #/* ----------------------------------------------------------------------- */#\n\n vprint(\"Processing ...\")\n\n progress_i = 0\n progress_total = len(files_to_index)\n for i_file in files_to_index:\n\n # Update user\n progress_i += 1\n vprint(\"\\r\\x1b[K\" + \" %s/%s\" % (progress_i, progress_total), flush=True)\n\n # Processing LiDAR\n if datatype == 'lidar':\n\n feature = ogr.Feature(output_layer.GetLayerDefn())\n feature.SetField(name_field, basename(i_file))\n feature.SetField(path_field, i_file)\n\n ring = get_lidar_extent(i_file)\n polygon = ogr.Geometry(output_layer.GetGeomType())\n polygon.AddGeometry(ring)\n\n feature.SetGeometry(polygon)\n output_layer.CreateFeature(feature)\n\n # Processing a raster\n elif datatype == 'raster':\n\n feature = ogr.Feature(output_layer.GetLayerDefn())\n feature.SetField(name_field, basename(i_file))\n feature.SetField(path_field, i_file)\n\n ring = get_raster_extent(i_file)\n polygon = ogr.Geometry(output_layer.GetGeomType())\n polygon.AddGeometry(ring)\n\n feature.SetGeometry(polygon)\n output_layer.CreateFeature(feature)\n\n # Processing a vector\n elif datatype == 'vector':\n\n v_ds = ogr.Open(i_file)\n l_count = v_ds.GetLayerCount()\n\n for l_idx in range(l_count):\n\n v_layer = v_ds.GetLayer(l_idx)\n feature = ogr.Feature(output_layer.GetLayerDefn())\n _name = basename(i_file)\n if l_count > 1:\n _name += ':' + v_layer.GetName()\n\n feature.SetField(name_field, _name)\n feature.SetField(path_field, i_file)\n\n ring = get_vector_extent(v_ds.GetLayer(l_idx))\n polygon = ogr.Geometry(output_layer.GetGeomType())\n polygon.AddGeometry(ring)\n\n feature.SetGeometry(polygon)\n output_layer.CreateFeature(feature)\n\n # Error\n else:\n raise ValueError(\"Datatype issue: %s\" % datatype)\n\n # Cleanup\n v_ds = None\n v_layer = None\n feature = None\n polygon = None\n ring = None\n feature = None\n tile = None\n\n vprint(\" - Done\")\n\n #/* ----------------------------------------------------------------------- */#\n #/* Cleanup and return\n #/* ----------------------------------------------------------------------- */#\n\n field = None\n output_drv = None\n output_ds = None\n output_layer = None\n\n return 0\n","sub_path":"acronym/cmdl/_portable_algorithms/tileindex.py","file_name":"tileindex.py","file_ext":"py","file_size_in_byte":20672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"411749194","text":"import re\nimport string\nfrom collections import defaultdict\n\nwith open('input.txt', 'r') as f:\n instructions = [x.groups() for x in re.finditer(\n r'^.*([A-Z])+.*([A-Z])+.*$', f.read(), re.MULTILINE)]\n\n\ngraph = defaultdict(list)\nrestrictions = defaultdict(list)\n\nfor instruction in instructions:\n graph[instruction[0]].append(instruction[1])\n graph[instruction[0]].sort(reverse=True)\n restrictions[instruction[1]].append(instruction[0])\n\n\nnon_restricted = [letter for letter in string.ascii_uppercase if len(\n restrictions[letter]) == 0]\n\n\ndef is_step_ready(step, visited):\n for s in restrictions[step]:\n if s not in visited:\n return False\n return True\n\n\ndef dfs(graph, vertex):\n stack = vertex\n stack.sort(reverse=True)\n path = []\n\n visited = defaultdict(bool)\n\n while stack:\n v = stack.pop()\n if v not in visited and is_step_ready(v, visited):\n visited[v] = True\n path.append(v)\n stack.extend(graph[v])\n stack.sort(reverse=True)\n\n return ''.join(path)\n\n\nfirst_part = dfs(graph, non_restricted)\nprint('First part: {0}'.format(first_part))\n","sub_path":"day_7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"599235925","text":"import random\nimport time\n\ndef würfeln(anzahl):\n alleWürfel = []\n for x in range(anzahl):\n alleWürfel.append(list(x for x in range(1,7)))\n werte = []\n ergebnis = 0\n for würfel in alleWürfel:\n werte.append(würfel[random.randint(0,5)])\n for x in werte:\n ergebnis += x\n\n return werte\n\nif __name__ == \"__main__\":\n anzahl = 1000\n liste = []\n v = 0\n while True:\n v2 = 0\n while würfeln(3) != [1,1,1]:\n v2 += 1\n v += 1\n liste.append(v2)\n if v == anzahl:\n print(liste)\n gesamt = 0\n for zahl in liste:\n gesamt += zahl\n gesamt /= anzahl\n print(\"{0:0.1f}\".format(gesamt))\n break","sub_path":"python/Dateien/python034.py","file_name":"python034.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"126500263","text":"\"\"\"\nAuthor: Jing (https://github.com/gnijuohz)\n\nIntersection of Two Linked Lists: https://oj.leetcode.com/problems/intersection-of-two-linked-lists \n\nWrite a program to find the node at which the intersection of two singly linked lists begins.\n\nFor example, the following two linked lists: \n\nA: a1 → a2\n ↘\n c1 → c2 → c3\n ↗ \nB: b1 → b2 → b3\n\nbegin to intersect at node c1.\n\nNotes:\n\nIf the two linked lists have no intersection at all, return null.\nThe linked lists must retain their original structure after the function returns. \nYou may assume there are no cycles anywhere in the entire linked structure.\nYour code should preferably run in O(n) time and use only O(1) memory.\n\n\n\nCredits:Special thanks to @stellari for adding this problem and creating all test cases. \nTags\nLinked List \n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param two ListNodes\n # @return the intersected ListNode\n def getIntersectionNode(self, headA, headB):\n if not headA or not headB:\n return\n head_A = headA\n set_A = set([])\n while head_A:\n set_A.add(head_A)\n head_A = head_A.next\n head_B = headB\n while head_B:\n if head_B in set_A:\n return head_B\n head_B = head_B.next\n return\n ","sub_path":"solutions/Intersection-of-Two-Linked-Lists.py","file_name":"Intersection-of-Two-Linked-Lists.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"335305610","text":"import os, sys, ROOT\nimport os.path\nimport numpy as np\n\ninput_filename = \"FEB15_Pos.txt\"\nnumber_of_variables_on_the_same_line = 3 # pt, mt, met\n\nwith open(input_filename, \"r\") as ins:\n array = []\n line_temp = ''\n var_counter = 0\n for line in ins:\n line = line.replace('\\n','').replace('\\t',' ')\n if(not (line.startswith(\"#\") or line.startswith(\"=\")) ):\n var_counter = var_counter+1\n if not line_temp == '':\n line_temp = str(line_temp)+\" \"+(line) # adjust spaces between variables on the same line\n else:\n line_temp = line\n if(line.startswith(\"#\") or line.startswith(\"=\")):\n array.append(line)\n elif(var_counter == number_of_variables_on_the_same_line):\n array.append(line_temp)\n var_counter = 0\n line_temp = ''\n \n\n\noutput_filename = open(os.path.splitext(input_filename)[0]+\"_sorted.txt\", \"w\")\nfor line in array:\n output_filename.write(line+\"\\n\")\noutput_filename.close()\n","sub_path":"CMGTools/WMass/analysis/utils/adjust_table.py","file_name":"adjust_table.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"273371347","text":"'''\nComparison of Continuous No-Regret Algorithms for the 2nd NIPS paper\n\n@author: Maximilian Balandat\n@date: May 27, 2015\n'''\n\n# Set up infrastructure and basic problem parameters\nimport matplotlib as mpl\nmpl.use('Agg') # this is needed when running on a linux server over terminal\nimport multiprocessing as mp\nimport numpy as np\nimport datetime, os\nfrom ContNoRegret.Domains import unitbox\nfrom ContNoRegret.LossFunctions import AffineLossFunction\nfrom ContNoRegret.NoRegretAlgos import ContNoRegretProblem\nfrom ContNoRegret.utils import CNR_worker, plot_results, save_results, circular_tour\nfrom ContNoRegret.animate import save_animations\nfrom ContNoRegret.Potentials import ExponentialPotential, pNormPotential, ExpPPotential, pExpPotential\n\n# this is the location of the folder for the results\nresults_path = '/Users/balandat/Documents/Code/Continuous_No-Regret/results/'\ndesc = 'NIPS2_CNR_greedyfail'\ntmpfolder = '/Volumes/tmp/' # if possible, choose this to be a RamDisk\n\n# some flags for keeping a record of the simulation parameters\nsave_res = True\nshow_plots = False\nsave_anims = False\nshow_anims = False\n\nT = 200 # Time horizon\nL = 5.0 # Uniform bound on the Lipschitz constant\nN = 2500 # Number of parallel algorithm instances\nNgrid = 500000 # Number of gridpoints for the sampling step\ndom = unitbox(2)\n\n# before running the computation, read this file so we can later save a copy in the results folder\nwith open(__file__, 'r') as f:\n thisfile = f.read()\n\n# create a sequence of losses that is really baaaad for greedy\nlossfuncs = [AffineLossFunction(dom, [L/2, 0], 0.25*L)] + [AffineLossFunction(dom, [(-1)**t*L, 0], 0.5*L) for t in np.arange(1,T)]\nM = L\nM4 = np.max([lossfunc.norm(4, tmpfolder=tmpfolder) for lossfunc in lossfuncs])\nM83 = np.max([lossfunc.norm(8/3, tmpfolder=tmpfolder) for lossfunc in lossfuncs])\n\nprob = ContNoRegretProblem(dom, lossfuncs, L, M, desc=desc)\n\n# the following runs fine if the script is the __main__ method, but crashes when running from ipython\npool = mp.Pool(processes=mp.cpu_count()-1)\nprocesses = []\n\nprocesses.append(pool.apply_async(CNR_worker, (prob,N,'Greedy'), {'Ngrid':Ngrid, 'pid':len(processes), \n 'tmpfolder':tmpfolder, 'label':'Greedy'}))\n\npotentials = [ExponentialPotential(), pNormPotential(1.001), pNormPotential(1.01), pNormPotential(1.05), pNormPotential(1.75, M=M83)]\n# pExpPotential(1.5, M=M4), pNormPotential(1.75, M=M83)]\n\nfor pot in potentials:\n processes.append(pool.apply_async(CNR_worker, (prob,N,'DA'), {'opt_rate':True, 'Ngrid':Ngrid,\n\t\t\t\t 'potential':pot, 'pid':len(processes), 'tmpfolder':tmpfolder, 'label':pot.desc, \n 'results_path':results_path, 'KL':[]})) \n\n# wait for the processes to finish an collect the results\nresults = [process.get() for process in processes]\n\n# plot results and/or save a persistent copy (pickled) of the detailed results\ntimenow = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')# create a time stamp for unambiguously naming the results folder\nresults_directory = '{}{}/'.format(results_path, timenow)\n \nif save_res: \n os.makedirs(results_directory, exist_ok=True) # this could probably use a safer implementation\n# plot_results(results, 100, results_directory, show_plots)\n if save_anims:\n save_animations(results, 10, results_directory, show_anims) \n save_results(results, results_directory) \n # store the previously read-in contents of this file in the results folder\n with open(results_directory+str(__file__), 'w') as f:\n f.write(thisfile)\nelse:\n plot_results(results, offset=100)\n\n","sub_path":"NIPS2_CNR_greedyfail.py","file_name":"NIPS2_CNR_greedyfail.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"585464887","text":"from zope.interface import registry\n\n\nfrom pyramid import renderers\nfrom pyramid import interfaces as pyramid_interfaces\n\n\n_DEFAULT_SERIALIZERS = (('application/json', renderers.JSON()), )\n_MARKER = object()\n\n\nclass Newtonian(object):\n def __init__(self, serializers=_DEFAULT_SERIALIZERS):\n self.components = registry.Components()\n self.content_types = []\n\n for content_type, serializer in serializers:\n self.add_serializer(content_type, serializer)\n\n def add_serializer(self, content_type, serializer):\n self.content_types.append(content_type)\n self.components.registerAdapter(serializer, (content_type, ),\n pyramid_interfaces.IRenderer)\n\n def get_serializer(self, content_type):\n adapters = self.components.adapters\n result = adapters.lookup(content_type,\n pyramid_interfaces.IRenderer,\n default=_MARKER)\n if result is _MARKER:\n msg = 'No renderer for content-type: %s' % content_type\n raise TypeError(msg)\n return result\n\n def __call__(self, info):\n settings = info['settings']\n default_content_type = settings.get('default_content_type',\n 'application/json')\n\n def _render(value, system):\n request = system.get('request')\n content_type = request.accept.best_match(self.content_types,\n default_content_type)\n serializer = self.get_serializer(content_type)\n return serializer(value, system)\n\n return _render\n","sub_path":"newtonian/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"393807335","text":"# -*- coding: utf-8 -*-\n\"\"\"\nScript for uploading securely built distributions (artifacts) to private\nDropbox directory.\nDropbox authorization token should be provided only as environment variable\nin a secure form. In case of CI systems (AppVeyor, Travis CI) this should\nbe provided as encrypted value in CI configuration file.\nWe prefer to use this method instead of native artifacts collection routine\nprovided by given CI system because it is more consistent.\n\"\"\"\n\nimport os\n\nimport dropbox\nimport dropbox.files\n\ndropbox_token = os.environ.get('DROPBOX_TOKEN')\n\ndbx = dropbox.Dropbox(dropbox_token)\n\n\nfor root, dirs, files in os.walk('dist'):\n for filename in files:\n local_path = os.path.join(root, filename)\n relative_path = os.path.relpath(local_path, 'dist')\n dropbox_path = \"/\" + relative_path\n\n with open(local_path, 'rb') as f:\n print(\"uploading %s\" % local_path)\n dbx.files_upload(\n f.read(), dropbox_path,\n dropbox.files.WriteMode('overwrite')\n )\n","sub_path":"ci/dropbox_upload.py","file_name":"dropbox_upload.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"619136701","text":"from datetime import datetime\nfrom decimal import Decimal\nimport geoip2\nimport geoip2.database\n\nfrom django.db import models\nfrom django.db.models import F, Sum\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\nfrom django.db.models.query import QuerySet\nfrom apps.stripe_payment.models import customer_get\nfrom apps.main.models import Category\nfrom django.conf import settings\n\n\nclass Ads(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n campaign = models.CharField(max_length=100)\n title = models.CharField(max_length=100)\n description = models.TextField()\n image = models.ImageField(upload_to='ads_pic/')\n url = models.URLField(max_length=500)\n PPC = 1\n IMPRESSION = 2\n OPT_CHOICES = (('', \"Select One\"), (PPC, \"Pay Per Click\"), (IMPRESSION, \"Impressions\"))\n option = models.SmallIntegerField(\n verbose_name=_('Option'), null=True, choices=OPT_CHOICES)\n age_min = models.IntegerField(default=13)\n age_max = models.IntegerField(default=100)\n SEX_MALE = 1\n SEX_FEMALE = 2\n SEX_BOTH = 3\n SEX_CHOICES = (\n (SEX_MALE, \"Male\"), (SEX_FEMALE, \"Female\"), (SEX_BOTH, \"Both\"))\n sex = models.SmallIntegerField(\n verbose_name=_('Sex'), null=True, choices=SEX_CHOICES)\n country = models.CharField(max_length=20, blank=True)\n category = models.ManyToManyField(Category)\n start_date = models.DateTimeField(\n verbose_name=_('Start date'))\n end_date = models.DateTimeField(\n verbose_name=_('End date'))\n budget = models.DecimalField(max_digits=10, decimal_places=2)\n rate = models.DecimalField(max_digits=10, decimal_places=2,\n help_text=_('Cost per click/view, '\n 'will determine visibility '\n 'position'))\n is_active = models.BooleanField(help_text='Active?')\n\n class Meta:\n verbose_name = _(\"Ads\")\n verbose_name_plural = _(\"Ads\")\n ordering = ['-id']\n\n def __str__(self):\n return '%s' % self.campaign\n\n @classmethod\n def get_client_ip(cls, request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\n @classmethod\n def get_country(cls, ip):\n db = '%s/%s' % (settings.BASE_DIR, 'whosbetter/geo.mmdb')\n reader = geoip2.database.Reader(db)\n try:\n response = reader.country(ip)\n except geoip2.errors.AddressNotFoundError as e:\n return False\n return response.country\n\n @classmethod\n def get_country_code(cls, request):\n ip = Ads.get_client_ip(request)\n country = Ads.get_country(ip)\n\n if country:\n return country.iso_code\n else:\n return country\n\n @classmethod\n def get_ads(cls, request, category=None):\n \"\"\"\n Returns the ads based on criteria matched\n \"\"\"\n\n today = timezone.now().astimezone(timezone.get_default_timezone())\n\n # sex, age_min, age_max, country, start_date, end_date,budget, category\n if request.user and getattr(request.user, 'my_profile', None):\n sex = request.user.my_profile.sex\n birthdate = request.user.my_profile.birthdate\n if birthdate:\n age = int((today - birthdate).days / 365)\n profile_found = True\n else:\n profile_found = False\n else:\n profile_found = False\n\n country_code = Ads.get_country_code(request)\n\n date = today.strftime('%Y-%m-%d')\n sql = \"SELECT a.*, b.spend FROM ads_ads a \" \\\n \"JOIN ads_ads_category c ON (a.id=c.ads_id) \" \\\n \"LEFT JOIN ads_dailyspending b ON(a.id = b.ads_id AND b.date = '%s') \" \\\n \"WHERE a.budget > CASE \" \\\n \"WHEN b.spend is NULL \" \\\n \"THEN 0 \" \\\n \"ELSE b.spend \" \\\n \"END \" \\\n \"AND start_date<='%s' AND end_date>='%s' \" \\\n \"AND is_active=1 \" % (date, date, date)\n\n if profile_found:\n sql = \"%s AND (a.sex='%s' OR a.sex=3) AND a.age_min <= '%s'\" \\\n \" AND a.age_max >= '%s'\" % (sql, sex, age, age)\n\n if (country_code):\n sql = \" %s AND (country LIKE '%s' OR country='')\" % (\n sql, '%s'+country_code+'%s')\n\n # for category\n if isinstance(category, Category):\n sql = \" %s AND c.category_id='%s'\" % (sql, category.id)\n elif isinstance(category, QuerySet):\n sql_cat = ''\n for cat in category:\n sql_cat = \" %s c.category_id='%s' OR\" % (sql_cat, cat.id)\n sql = ' %s AND (%s)' % (sql, sql_cat[:-2])\n sql_cat = None\n\n sql = '%s ORDER BY a.rate LIMIT 5' % sql\n # This will fire query to db and save ad row in ads vars.\n # So that we'll have objects before it update spend/count in db (log)\n ads = [ad for ad in Ads.objects.raw(sql)]\n # DailySpending.update_log_bulk(ads, request)\n return ads\n\n @classmethod\n def get_rank(cls, request_data, category=None):\n \"\"\"\n Returns the rank of new add, based on criteria\n \"\"\"\n\n today = timezone.now().astimezone(timezone.get_default_timezone())\n date = datetime(\n int(request_data.get('start_date_year', today.year)),\n int(request_data.get('start_date_month', today.month)),\n int(request_data.get('start_date_day', today.day)),\n 0,\n 0,\n 0,\n 0,\n )\n\n ads = cls.objects.filter(\n age_min__gte=request_data.get('age_min', 1),\n age_max__lte=request_data.get('age_max', 100),\n start_date__gte=date,\n is_active=True\n )\n sex = request_data.get('sex', 3)\n if sex != 3:\n ads = ads.filter(sex=sex)\n\n country_code = request_data.get('country', None)\n if (country_code):\n ads = ads.filter(country__contains=country_code)\n\n if category:\n ads = ads.filter(category=category)\n\n ads = ads.order_by('-rate')\n\n rank = 0\n for ad in ads:\n rank += 1\n if ad.rate <= Decimal(request_data.get('rate', 0.1)):\n break\n\n if rank == 0:\n rank = 1\n\n return rank\n\n @staticmethod\n def update_log_bulk(ads, request):\n DailySpending.update_log_bulk(ads, request)\n\n @classmethod\n def charge_click(cls, request, ads_id):\n try:\n ad = Ads.objects.get(id=ads_id)\n if ad.option == 1:\n DailySpending.update_log(ad.id, request, 'PPC', ad.rate)\n except:\n pass\n\n\nclass DailySpending(models.Model):\n ads = models.ForeignKey(Ads)\n spend = models.DecimalField(max_digits=10, decimal_places=2, default=0)\n click_count = models.IntegerField(default=0, blank=True)\n impression_count = models.IntegerField(default=0, blank=True)\n date = models.DateField()\n\n class Meta:\n verbose_name = _('Daily Spending')\n verbose_name_plural = _('Daily Spending')\n\n def __str__(self):\n return '%s' % self.id\n\n @classmethod\n def update_log_bulk(cls, ads, request):\n for ad in ads:\n if ad.option == 1:\n cls.update_log(ad.id, request, 'IMPRESSION')\n else:\n cls.update_log(ad.id, request, 'IMPRESSION', ad.rate)\n\n @classmethod\n def update_log(cls, ads_id, request, log_type='PPC', rate=None):\n today = timezone.now().astimezone(timezone.get_default_timezone())\n date = today.strftime('%Y-%m-%d')\n\n daily_spend, created = DailySpending.objects.get_or_create(\n ads_id=ads_id,\n date=date\n )\n if (rate):\n daily_spend.spend = daily_spend.spend + rate\n\n landing_page = request.path\n if log_type == 'PPC':\n landing_page = request.META.get(\n 'HTTP_REFERER', landing_page).replace(\n 'http://' + request.META['HTTP_HOST'], '')\n daily_spend.click_count = daily_spend.click_count + 1\n else:\n daily_spend.impression_count = daily_spend.impression_count + 1\n\n daily_spend.save()\n\n log = Log(\n ads_id=ads_id,\n datetime=today,\n landing_page=landing_page,\n remote_ip=Ads.get_client_ip(request),\n remote_agent=request.META['HTTP_USER_AGENT']\n )\n log.save()\n\n\nclass Log(models.Model):\n ads = models.ForeignKey(Ads)\n datetime = models.DateTimeField()\n landing_page = models.CharField(max_length=250)\n remote_ip = models.GenericIPAddressField()\n remote_agent = models.TextField()\n\n def __str__(self):\n return '%s' % self.id\n\n\ndef billing_exists(user):\n try:\n customer_get(user)\n return True\n except:\n return None\n\nmsg_no_billing = 'Please setup billing information before posting add'\n","sub_path":"apps/ads/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"200575265","text":"import sys\nif __name__ == '__main__':\n args = sys.argv[1:]\n\n site_name = args[1]\n\n # Gotta stitch them domains up!\n domains = ' '.join(args[2:])\n\n config = ''\n with open('default_nginx') as default_file:\n reader = default_file.readlines()\n for line in reader:\n config += line\n\n # As NGINX has the { } config setup inside it, it messes with the ability to do .format and this makes me sad...\n config = config.replace('{root_path}', args[0])\n config = config.replace('{log_path}', '/var/log/' + site_name)\n config = config.replace('{domains}', domains)\n\n with open(site_name, 'w') as new_file:\n new_file.write(config)\n\n exit()\n","sub_path":"ubuntu-18-04/lemp/new_nginx.py","file_name":"new_nginx.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"367740402","text":"import Source.utils\nimport time\n\n# Main\n\nprint(\"Veuillez entrer la commande\")\nparameters = input() # IDc#ADc#IDv#TicketTgs\n\ntry:\n if(parameters.split(' ')[0] != \"tgt\"):\n raise ValueError(\"La commande n'existe pas\")\n if parameters.count(\"#\") != 4:\n raise ValueError(\"Nombre d'argument incorrect\")\n\n params = parameters.split(' ')[1]\n tabParameters = params.split('#')\n\n IDc = Source.utils.findUser(tabParameters[0], \"cles.txt\")\n if IDc == False:\n raise ValueError(\"Utilisateur introuvable\")\n\n ADc = tabParameters[1]\n IDv = Source.utils.findUser(tabParameters[2], \"cles.txt\")\n if IDv == False:\n raise ValueError(\"Serveur (V) introuvable\")\n\n ticketTgs = tabParameters[3]\n\n Source.utils.write(\"var\", IDv[0] + \" \" + IDv[1], \"a+\")\n\n print(\"Veuillez entrer votre mot de passe\")\n mdp = input()\n\n ticketVerif = Source.utils.dechiffrement(mdp, ticketTgs)\n\n IDtgs = Source.utils.readByLine(\"var\", 1)\n IDtgsSplit = IDtgs.split(\" \")\n\n verificationTgs = Source.utils.dechiffrement(IDtgsSplit[1], ticketVerif)\n verificationTgsPart = verificationTgs.split(\"#\")\n\n if ADc != verificationTgsPart[1]:\n raise ValueError(\"Adresse client incorrecte\")\n if time.time() - float(verificationTgsPart[3]) > float(verificationTgsPart[4]):\n raise ValueError(\"TICKET EXPIRE\")\n if IDtgsSplit[0] != \"TGS1\":\n raise ValueError(\"Mauvais TGS\")\n\n ticketV = Source.utils.chiffrement(IDv[1], IDc[0] + \"#\" + ADc + \"#\" + IDv[0] + \"#\" + str(int(time.time())) + \"#\" + tabParameters[4])\n print(ticketV[0])\n\nexcept ValueError as err:\n print(err)\n\n","sub_path":"Source/tgs.py","file_name":"tgs.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"618207440","text":"primes = []\nlimit = 100\n\nfor n in range(2, limit + 1):\n is_prime = True\n for divisor in range(2, n):\n if n % divisor == 0:\n is_prime = False\n break # Breaks out of CURRENT loop, continues to the next statement\n if is_prime: #\n primes.append(n)\n\nprint(primes)\n","sub_path":"work/examples/prime_generator.py","file_name":"prime_generator.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"602908496","text":"import time\nimport struct\nimport socket\nimport sys\nimport select\nimport signal\n\n\nfrom Crypto.Util.number import getRandomNumber\nfrom Crypto.Cipher import AES\nimport database\nfrom logger import logger as log\n\nfrom macros import *\nfrom inspect import currentframe\n\n# importing rsa keys\nimport rsa_keys\n\nreq_list = {} # saves requested list data, EntityName: pubkey, nonce\n\nLOGGING = False\nDEBUG = True\nauthName = \"auth1\"\nauthNameLength = len(authName)\ncertificate = \"Certificate file\" # give RSA certificate file here as text\ncertificateLength = len(certificate)\n\nAUTHPORT = 5555\nTIMER = 5\nBUFFER = 1024\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\ndef getHash(data: bytes, key: bytes, iv: bytes):\n data_len = len(data) // 8\n cbc_hash = AES.new(key, AES.MODE_CBC, iv)\n data = cbc_hash.encrypt(data)\n data_int = int.from_bytes(data, 'big')\n hash_value = data_int & 0xffffffffffffffff\n for i in range(0, data_len-1):\n data_int = data_int >> 64\n hash_value = hash_value ^ (data_int & 0xffffffffffffffff)\n return hash_value.to_bytes(8, 'big')\n\n\ndef process_padding(data: bytes = ''):\n # making string 32 bit aligned\n data_length = len(data)\n data_rest = data_length % 16\n if data_rest != 0:\n data = data.ljust(data_length + 16 - data_rest, b'\\0')\n return data\n\n\ndef str2ip(ip_str: str):\n ip = ip_str.split('.')\n if not len(ip) == 4:\n raise ValueError(\"IP address is not in proper format(ie x.x.x.x\")\n for byte in ip:\n if int(byte) not in range(0, 256):\n raise ValueError(\"IP address values are not proper (ie between 0.0.0.0 and 255.255.255.255)\")\n return struct.pack('4B',int(ip[0]),int(ip[1]),int(ip[2]),int(ip[3]))\n\n\ndef ip2str(ip: bytes):\n if len(ip) > 4:\n raise ValueError(\"IP address is not in proper (ie 32-bit in Length\")\n ip_str = str(ip[0]) +'.'+ str(ip[1]) +'.'+str(ip[2]) +'.'+str(ip[3])\n return ip_str\n\n\ndef rsa_encrypt(key: int, resp: bytes):\n #rsa encryption should be done here\n resp_length = len(resp)\n data_length = resp_length // 127\n if resp_length % 127:\n data_length = data_length + 1\n for i in range(0, (127-resp_length%127)):\n resp = resp + b'\\0'\n\n data = b''\n for i in range(0, data_length):\n x = int.from_bytes(b'\\0'+resp[i*127:(i*127)+127], 'big')\n x = pow(x, rsa_keys.public_expo, key)\n data = data + x.to_bytes(128, 'big')\n\n return data\n\n\ndef rsa_decrypt(resp: bytes):\n # write decrypt code here\n resp_length = len(resp)\n if (resp_length % 128) != 0:\n print(\"Invalid cypher size\")\n return None\n data_length = resp_length // 128\n\n data = b''\n for i in range(0, data_length):\n x = int.from_bytes(resp[(i*128): (i*128) + 128], 'big')\n x = pow(x, rsa_keys.private_expo, rsa_keys.modulus)\n x = x.to_bytes(128, 'big')\n data = data + x[1:128]\n\n return data\n\ndef rsa_verify(key: int, cert: int):\n if key >= 1<<1024:\n return False\n if cert >= 1<<1024:\n return False\n\n genkey = pow(cert, rsa_keys.public_expo, rsa_keys.root_modulus)\n\n if genkey == key:\n return True\n\n return False\n\n\n\n\ndef aes_encrypt(EntityName: str, data: bytes):\n key = database.getDistKey(EntityName)\n iv = getRandomNumber(128)\n if DEBUG:\n print(\"Encrypting data for \" + EntityName)\n print(\"Using AES key: %s\\nand iv : %s\" % (hex(key), hex(iv)))\n # do aes encryption here\n iv = iv.to_bytes(16, 'big')\n key = key.to_bytes(16, 'big')\n cbc = AES.new(key, AES.MODE_CBC, iv)\n data = process_padding(data)\n data = cbc.encrypt(data)\n hash_code = getHash(data=data, key=key, iv=iv)\n return iv + hash_code + data\n\n\ndef aes_decrypt(EntityName: str, data: bytes, ip_addr: tuple):\n key = database.getDistKey(EntityName)\n if key:\n if DEBUG:\n print(\"Decrypting data from \" + EntityName)\n print(\"Using AES key: %s\" % hex(key))\n # write aes decrypt algorythem here\n key = key.to_bytes(16, 'big')\n data = data[1:] # ENCPD\n iv = data[0:16]\n hash_value1 = data[16:24]\n data = process_padding(data[24:])\n hash_value2 = getHash(data=data, key=key, iv=iv)\n if hash_value1 == hash_value2:\n cbc = AES.new(key, AES.MODE_CBC, iv)\n data = cbc.decrypt(data)\n flag = data[0]\n function_pointer[flag](EntityName=EntityName, data=data, ip_addr=ip_addr)\n else:\n print(\"Data from %s got corrupted\" % EntityName)\n if DEBUG:\n print(hex(int.from_bytes(hash_value1, 'big')))\n print(hex(int.from_bytes(hash_value2, 'big')))\n print(hex(int.from_bytes(iv, 'big')))\n cbc = AES.new(key, AES.MODE_CBC, iv)\n data = cbc.decrypt(data)\n print(data)\n return\n\n# nonce handler\ndef nonceHandler(EntityName: str, data: bytearray, ip_addr: tuple):\n data = data[1:]\n data = rsa_decrypt(data[0:128])\n if not data:\n return\n nonceGen = data[0:16]\n nonceGot = data[16:32]\n print(\"Verifing Nonce got back from %s\" % EntityName)\n if DEBUG:\n print(\"Got nonce1: %s\" % hex(int.from_bytes(nonceGen, 'big')))\n print(\"Got nonce2: %s\" % hex(int.from_bytes(nonceGot, 'big')))\n valid = False\n nonceGen = int.from_bytes(nonceGen, 'big')\n if nonceGen == req_list[EntityName]['nonce']:\n print(\"Nonce verified for %s as **valid**\" % EntityName)\n valid = True\n else:\n print(\"Nonce verified for %s as **invalid**\" % EntityName)\n print(\"Sent nonce: %s\" % hex(req_list[EntityName]['nonce']))\n req_list.pop(EntityName)\n if valid:\n if not database.isEntityReg(EntityName):\n ip, port = ip_addr\n groupName = req_list[EntityName]['groupname']\n validUntil = time.time() + (3600 * database.getValidity(groupName))\n print(\"Generating Distribution Key\")\n distKey = getRandomNumber(AESKEYSIZE)\n pubkey = req_list[EntityName]['pubkey']\n print(\"Adding %s to EntityTable\" % EntityName)\n database.addElement(EntityName=EntityName,\n GroupName=groupName,\n PublicKey=pubkey,\n DistKey=distKey,\n ValidUntil=validUntil,\n ip=ip,\n port=port)\n req_list.pop(EntityName)\n else:\n print(\"%s is already registered\" % EntityName)\n distKey = database.getDistKey(EntityName)\n pubkey = database.getPubKey(EntityName)\n print(\"Distkey: %s\" % hex(distKey))\n # send registration ack\n distKey = distKey.to_bytes(16, 'big')\n resp = rsa_encrypt(key=pubkey, resp=nonceGot+distKey)\n msg = struct.pack(\"!%dscB128s128s128s\" % authNameLength,\n authName.encode(), sep, ACPTREG,\n rsa_keys.modulus.to_bytes(128, 'big'),\n rsa_keys.cert.to_bytes(128, 'big'),\n resp)\n print(\"Sending nonce back and distkey to %s\" % EntityName)\n serverSocket.sendto(msg, ip_addr)\n print()\n return\n else:\n print(\"Sending reject message to %s\" % EntityName)\n reject = rsa_encrypt(req_list[EntityName]['pubkey'], RJCTREG.to_bytes(1,'big'))\n resp = struct.pack(\"!%dscB128s128s128s\" % authNameLength,\n authName.encode(), sep, RJCTREG,\n rsa_keys.modulus.to_bytes(128, 'big'),\n rsa_keys.cert.to_bytes(128, 'big'),\n reject\n )\n serverSocket.sendto(resp, ip_addr)\n req_list.pop(EntityName)\n print()\n return\n# registration request handler\ndef reqregHandler(EntityName: str, data: bytearray, ip_addr: tuple):\n groupNameLength = data[1]\n groupName = data[2:2+groupNameLength].decode()\n publicKey_starts = 2 + groupNameLength\n publicKey = data[publicKey_starts: publicKey_starts+128]\n publicKey = int.from_bytes(publicKey, 'big')\n entityCert_starts = publicKey_starts+128\n entityCert = data[entityCert_starts: entityCert_starts + 128]\n entityCert = int.from_bytes(entityCert, 'big')\n\n if DEBUG:\n print(\"Processing Registration request for %s\" % EntityName)\n print(\"Group Name:\", groupName)\n print(\"IP: %s Port: %d\" % ip_addr)\n print(\"Present public module by \" + EntityName)\n temp = hex(publicKey).split('x')[1].zfill(128)\n for i in range(0,4):\n print(\"\\t%s\" % temp[32*i:(32*i)+32])\n print(\"Presented certificate by \" + EntityName)\n temp = hex(entityCert).split('x')[1].zfill(128)\n for i in range(0,4):\n print(\"\\t%s\" % temp[32*i:(32*i)+32])\n\n print(\"Validating certificate given by \" + EntityName)\n valid = False\n # Validating certificate here\n if rsa_verify(key=publicKey, cert=entityCert):\n print(\"Certificate verified as **valid**\")\n valid = True\n else:\n print(\"Certificate verified as **invalid**\")\n print(\"Admin shall be informed\")\n\n isGroup = database.getValidity(groupName)\n if not isGroup:\n valid = False\n print(\"GroupName is not in group table\")\n\n if valid:\n if DEBUG:\n print(\"Generating 128bit Nonce\")\n nonceGen = getRandomNumber(AESKEYSIZE)\n print(\"nonce = %s\" % hex(nonceGen))\n new_req = {}\n new_req.update({'nonce': nonceGen})\n new_req.update({'pubkey': publicKey})\n new_req.update({'groupname': groupName})\n req_list.update({EntityName: new_req})\n nonceGen = nonceGen.to_bytes(16, 'big')\n nonceGen = rsa_encrypt(publicKey, nonceGen)\n resp = struct.pack(\"!%dscB128s128s128s\" % authNameLength,\n authName.encode(), sep, NONCE,\n rsa_keys.modulus.to_bytes(128, 'big'),\n rsa_keys.cert.to_bytes(128, 'big'),\n nonceGen\n )\n serverSocket.sendto(resp, ip_addr)\n if DEBUG:\n print(\"Nonce with credentials are send to %s\" % EntityName)\n\n else:\n print(\"Sending reject message to %s\" % EntityName)\n reject = rsa_encrypt(publicKey, int.to_bytes(RJCTREG,1,'big'))\n resp = struct.pack(\"!%dscB128s128s128s\" % authNameLength,\n authName.encode(), sep, RJCTREG,\n rsa_keys.modulus.to_bytes(128, 'big'),\n rsa_keys.cert.to_bytes(128, 'big'),\n reject\n )\n serverSocket.sendto(resp, ip_addr)\n print()\n return\n\n\ndef reqAccessHandler(EntityName: str, data: bytes, ip_addr: tuple):\n Entity2length = data[1]\n Entity2Name = data[2:2+Entity2length].decode()\n reqTime = struct.unpack(\"!I\", bytes(data[2+Entity2length:2+Entity2length+4]))[0]\n\n print(\"Processing Access request from %s to %s for %dm\" % (EntityName, Entity2Name, reqTime))\n grantedTime = database.getAccess(FromEntityName=EntityName,\n ToEntityName=Entity2Name,\n reqTime=reqTime)\n if grantedTime:\n print(\"%s is granted access to %s for %sm time\" % (EntityName, Entity2Name, grantedTime))\n allowedUntil = time.time() + (60*grantedTime)\n\n sessionKey = database.getSessionKey(FromEntityName=Entity2Name,\n ToEntityName=EntityName)\n if not sessionKey:\n sessionKey = getRandomNumber(AESKEYSIZE)\n\n database.addSession(FromEntityName=EntityName,\n ToEntityName=Entity2Name,\n AllowedUntil=allowedUntil,\n key=sessionKey)\n\n\n if DEBUG:\n print(\"Generated Session key: %s\" % hex(sessionKey))\n\n sessionKey1 = ((sessionKey >> 64) & ((1 << 64)-1))\n sessionKey2 = (sessionKey & ((1 << 64)-1))\n\n ip, port = database.getIPaddr(EntityName)\n ip2, port2 = database.getIPaddr(Entity2Name)\n if not ip2 or not port2 or not ip or not port:\n print(\"Some problem with database\")\n exit(10)\n\n print(\"Sendnig session key to %s\" % EntityName)\n data = struct.pack((\"!BB%dsI4sHQQ\" % Entity2length),\n ACPTACC, Entity2length,\n Entity2Name.encode(), int(grantedTime),\n str2ip(ip2), port2,\n sessionKey1, sessionKey2)\n msg = aes_encrypt(EntityName=EntityName, data=data)\n msg = struct.pack((\"!%dscB\" % authNameLength), authName.encode(), sep, ENCPTD) + msg\n serverSocket.sendto(msg, (ip, port))\n\n print(\"Sendnig acknowledgement with session key to %s\" % Entity2Name)\n EntityNameLength = len(EntityName)\n data = struct.pack((\"!BB%dsI4sHQQ\" % EntityNameLength),\n ACKACC, EntityNameLength,\n EntityName.encode(), int(grantedTime),\n str2ip(ip), port,\n sessionKey1, sessionKey2)\n msg = aes_encrypt(EntityName=Entity2Name, data=data)\n msg = struct.pack((\"!%dscB\" % authNameLength), authName.encode(), sep, ENCPTD) + msg\n serverSocket.sendto(msg, (ip2, port2))\n\n else:\n print(\"%s is rejected to access to %s\" % (EntityName, Entity2Name))\n print(\"Sending reject message to %s\" % EntityName)\n data = struct.pack((\"!BB%ds\" % Entity2length),\n RJCTACC, Entity2length,\n Entity2Name.encode())\n msg = aes_encrypt(EntityName=EntityName, data=data)\n msg = struct.pack((\"!%dscB\" % authNameLength), authName.encode(), sep, ENCPTD) + msg\n serverSocket.sendto(msg, ip_addr)\n\n print()\n return\n\n\ndef ackAuthHandler(EntityName: str, data: bytearray, ip_addr: tuple):\n # Nothing to do here\n return\n\nlast_time = time.time()\ndef timerHandler():\n global last_time\n #print(\"!!!Time-out!!!\")\n current_time = time.time()\n for entry in database.getSKValidUntil():\n if current_time >= entry[2]:\n print(\"Session timed-out for %s to %s\" % (entry[0], entry[1]))\n database.removeSession(entry[0], entry[1])\n\n if (current_time - last_time) > (60 * TIMER):\n last_time = current_time\n for entry in database.getDKValidUntil():\n if current_time >= entry[1]:\n print(\"DistKey timed-out for %s\" % (entry[0]))\n database.removeElement(entry[0])\n \n return\n\n\ndef main():\n database.createTables()\n serverSocket.bind(('', AUTHPORT))\n\n fd = sys.stdin.fileno()\n #signal.signal(signal.SIGALRM, timerHandler)\n #signal.alarm(5)\n print()\n while True:\n try:\n readable, writable, excep = select.select([serverSocket, fd], [], [], TIMER)\n except KeyboardInterrupt:\n print(\"Terminating Auth Server...\")\n serverSocket.close()\n sys.exit(0)\n except:\n print(\"Unknown Exception occurred:\")\n continue\n else:\n if fd in readable:\n inputLine = input()\n inputLine = str(inputLine).split(' ')\n\n if serverSocket in readable:\n data, addr = serverSocket.recvfrom(BUFFER)\n splitedData = data.split(sep)\n EntityName = splitedData[0].decode()\n EntityNameLength = len(EntityName)\n data = data[EntityNameLength+1:]\n flag = data[0]\n function_pointer[flag](EntityName=EntityName, data=data, ip_addr=addr)\n\n timerHandler()\n # Loop, printing any data we receive\n\n# List of various function handler\nfunction_pointer = {ENCPTD: aes_decrypt,\n REQREG: reqregHandler,\n REQACC: reqAccessHandler,\n ACKAUTH: ackAuthHandler,\n NONCE : nonceHandler}\nif __name__ == '__main__':\n\n main()\n\n'''\npacket formats = entity name:status[8bit]:data[variable] CoSV(Colon-Separated Values) format\n[plaintext] # entity name : REQREG[8bit] : type[8bit] : group name : length of cert in bytes[32bit] : Certificate\n[RSA] # auth name : RJCTREG[8bit] : length of cert in bytes[32bit] : Certificate\n[RSA] # auth name : ACPTREG[8bit] : length of cert in bytes[32bit] : Certificate : dist key\n[AES-GCM] # entity name : REQACC[8bit] : length of entity name : entity name : access time in minutes (requested)\n[AES-GCM] # auth name : RJCTACC[8bit] : length of entity name : entity name\n[AES-GCM] # auth name : ACPTACC[8bit] : length of entity name : entity name : access time in minutes (granted) : session key\n[AES-GCM] # entity name : REQCOMM[8bit] : data to n fro application\n[AES-GCM] # entity name : RESPCOMM[8bit]: data to n fro application\n\nall encrypted message formats\n # entity/auth name : ENCPTD[8bit] : Encrypted message in above format without entity/auth name\n'''\n","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":17315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"527004682","text":"import sqlite3 as sq\ntry:\n\tconn = sq.connect('database.db')\n\tcursor = conn.cursor()\n\n\tcursor.execute(\"\"\"CREATE TABLE users\n\t\t(id text, username text, is_admin text)\n\t\t\"\"\")\n\tconn.commit()\n\tprint('Good_users_db')\nexcept:\n\tprint('БД для Users уже создана , для обновления удалите папку database.db и запустите create_db.py')\n\n\n","sub_path":"create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"523715077","text":"#!/usr/bin/python3\n\nfrom PyPDF2 import PdfReader, PdfWriter\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rcParams\nfrom sys import argv\nfrom pathlib import Path\nimport textwrap\n\n\n# support yahei\nrcParams['pdf.fonttype'] = 42\nrcParams['font.family'] = 'Microsoft Yahei'\nrcParams['font.size'] = 80\n\n\n# def generate_watermark(text: str) -> Path:\n# from reportlab.pdfbase import pdfmetrics, ttfonts\n# from reportlab.platypus import Paragraph\n# from reportlab.pdfgen import canvas\n# from reportlab.lib.styles import ParagraphStyle\n# from reportlab.lib.colors import Color\n# from reportlab.graphics.charts.textlabels import Label\n# from reportlab.graphics.shapes import Drawing, String\n# wm_file = Path('rl_watermark.pdf').absolute()\n# # register YaHei font\n# pdfmetrics.registerFont(ttfonts.TTFont('yahei', 'msyh.ttc'))\n# transparent_blue = Color(0, 115, 255, alpha=0.2)\n# # may overwrite\n# # set style\n# style = ParagraphStyle('normal',\n# fontName='yahei', fontSize=70, leading=70*1.5,\n# textColor=transparent_blue,)\n# # wordWrap='LTR')\n#\n# p = Paragraph(text, style)\n# c = canvas.Canvas(str(wm_file))\n# c.setFont('yahei', 70)\n# c.setFillColor(transparent_blue)\n# w, h = c._pagesize\n# width, height = 10, 10\n# degree = 45\n# # c.rotate(degree)\n# ww, wh = p.wrapOn(c, w, h)\n# # p.drawOn(c, ww, wh)\n# p.drawOn(c, 0, h/2-(wh/2))\n# c.save()\n# return wm_file\n\n\ndef generate_watermark(text: str) -> Path:\n wm_file = Path('rl_watermark.pdf').absolute()\n # a4\n fig = plt.figure(1, figsize=(8.27, 11.69), dpi=72)\n wrap_text = textwrap.fill(text, width=10)\n fig.text(0.05, 0.05, wrap_text, rotation=45, alpha=0.2, color=(0, 0.5, 1),\n rasterized=True)\n plt.savefig(wm_file, transparent=True)\n return wm_file\n\n\ndef add_mark(pdf: Path, mark: 'Path or str'):\n # print('Usage: python3 add_watermark.py original.pdf watermark.pdf')\n # print('Or:')\n # print('Usage: python3 add_watermark.py original.pdf watermark_text')\n original = Path(pdf)\n output = original.absolute().parent / ('new-'+original.name)\n watermark = Path(mark)\n if isinstance(mark, Path):\n pass\n else:\n watermark = generate_watermark(mark)\n\n wm_obj = PdfReader(str(watermark))\n wm_page = wm_obj.pages[0]\n\n reader = PdfReader(str(original))\n writer = PdfWriter()\n\n for index in range(len(reader.pages)):\n page = reader.pages[index]\n page.merge_page(wm_page)\n writer.add_page(page)\n\n with open(output, 'wb') as out:\n writer.write(out)\n watermark.unlink()\n print('Done.')\n return output\n\nif __name__ == '__main__':\n add_mark(argv[1], argv[2])\n","sub_path":"pdf/add_watermark.py","file_name":"add_watermark.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"426781139","text":"import json\n\nclass ConfigurationFile(object):\n def __init__(self):\n # initialize the class variables\n self._status = None\n self.data = {}\n # load file\n self.status\n\n @property\n def load_config_file(self):\n '''\n Loads JSON based config file and parses it into a dictionary \"Data\".\n\n '''\n # try to open file\n try:\n file = open('configuration', 'r')\n config_str = file.read()\n # parse file to JSON\n self.data = json.loads(config_str)\n file.close()\n return True\n except (OSError, ValueError) as e:\n return False\n\n @property\n def status(self):\n \"\"\"\n Returns \"False\" if the file didn't load correctly or important keys are missing.\n\n \"\"\"\n\n # Load file and check if it exists\n self._status = False\n if self.load_config_file is False:\n return self._status\n\n # check if necessary keys exist in dictionary\n if (\"dev_eui\" and \"owner_id\" and \"calibration_value\" and \"guest_id\" and \"location\" and \"session_id\" in self.data) is False:\n return self._status\n\n # Config File seems to be ok!\n self._status = True\n return self._status\n\n def update(self, key, value):\n \"\"\"\n Updates a key-specific value in the config dictionary and loads that\n dict into the config file.\n\n \"\"\"\n # change value in dict\n self.data[key] = value\n\n try:\n # Dictionary into JSON\n config_str = json.dumps(self.data)\n # Open file and write JSON to it\n f = open('configuration', 'w')\n f.write(config_str)\n f.close()\n\n # Something went wrong\n except (ValueError, OSError) as e:\n self.status = False\n\n def update_many(self, entries):\n \"\"\"\n loads all key-specific values from \"entries\" into the config dict and\n config file.\n\n \"\"\"\n for key, value in entries.items():\n self.update(key, value)\n\n def initialize(self):\n \"\"\"\n This method is used to initialize the config dict and file.\n\n \"\"\"\n new_config = {\"dev_eui\": \"None\", \"owner_id\": \"None\", \"location\": \"None\", \"guest_id\": \"None\",\n \"calibration_value\": 0, \"session_id\": 0, \"tx_delaytime\": 0, \"last_transmission\": None,}\n self.data[\"msg_queue\"] = []\n self.update_many(new_config)\n\n def enqueue_msg(self, msg):\n \"\"\"\n Puts msg into the Message Queue\n\n \"\"\"\n self.data[\"msg_queue\"].append(msg)\n self.update(\"msg_queue\", self.data[\"msg_queue\"])\n\n def dequeue_msg(self):\n \"\"\"\n Deletes first entry of Message Queue\n\n \"\"\"\n self.data[\"msg_queue\"].pop(0)\n self.update(\"msg_queue\", self.data[\"msg_queue\"])\n\n @property\n def queue_status(self):\n \"\"\"\n Returns True if queue is not empty.\n\n \"\"\"\n if not self.data[\"msg_queue\"]:\n return False\n return True\n\n @property\n def is_configured(self):\n \"\"\"\n Returns True if the node is configured.\n\n \"\"\"\n if (self.data[\"dev_eui\"] == \"None\" or self.data[\"owner_id\"] == \"None\" or self.data[\"location\"] == \"None\" or self.data[\"last_transmission\"] == 0 or self.data[\"tx_delaytime\"] == 0):\n return False\n return True\n\n @property\n def is_calibrated(self):\n \"\"\"\n Returns True if the node is calibrated.\n\n \"\"\"\n if (self.data[\"calibration_value\"] != 0):\n return True\n return False\n\n @property\n def is_occupied(self):\n \"\"\"\n Returns True when the parking space is occupied.\n \"\"\"\n if self.data[\"guest_id\"] == \"None\":\n return False\n return True\n\n def transmission_message(self, mode):\n \"\"\"\n Returns payload for a lora transmission.\n\n \"\"\"\n _msg_dict = {}\n _msg_dict[\"msg_type\"] = mode\n if mode == \"registration\":\n _msg_dict[\"owner_id\"] = self.data[\"owner_id\"]\n _msg_dict[\"location\"] = self.data[\"location\"]\n _msg_dict[\"session_id\"] = self.data[\"session_id\"]\n elif mode == \"update\":\n _msg_dict[\"guest_id\"] = self.data[\"guest_id\"]\n _msg_dict[\"time\"] = self.data[\"timestamp\"]\n _msg_dict[\"session_id\"] = self.data[\"session_id\"]\n return json.dumps(_msg_dict)\n\n\nif __name__ == '__main__':\n config = ConfigurationFile()\n config.initialize()\n config.update(\"dev_eui\", \"5E DA 34 01 70 E6 4D\")\n config.update(\"owner_id\", 1)\n config.update(\"location\", 1)\n config.update(\"calibration_value\", 0)\n print(\"Device is configured? {}\".format(config.is_configured))\n config.enqueue_msg(config.transmission_message(\"update\"))\n print(\"ConfigFile: {}\".format(config.data))\n print(config.queue_status)\n","sub_path":"lib/configuration_file.py","file_name":"configuration_file.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"113195689","text":"import glob\nimport json\nimport matplotlib.pyplot as plt\n\nfile_list = glob.glob('./models/*/*/*/logs/*.json')\nfile_list = [name for name in file_list if '5' in name]\n# file_list = [name for name in file_list if 'GAN-I' in name]\n# file_list = [name for name in file_list if 'R_0.1__' in name]\n# mode = 'full' # full, all\n# mode = 'test' # full, all\n# legend_lable = []\n\ncolormap = plt.cm.gist_ncar\n# plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 0.9, len(file_list))])\nprint(file_list)\nfor name in file_list:\n with open(name, 'r') as f:\n returns = json.load(f)\n print(returns['models\\\\simple_tag\\\\log\\\\run5\\\\logs/agent0/mean_episode_rewards/reward'])\n\n\n# plt.legend(legend_lable)\n# plt.show()\n","sub_path":"show_result.py","file_name":"show_result.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"475577513","text":"'''\n1부터 10,000까지 8이라는 숫자가 총 몇번 나오는가?\n\n8이 포함되어 있는 숫자의 갯수를 카운팅 하는 것이 아니라 8이라는 숫자를 모두 카운팅 해야 한다.\n(※ 예를들어 8808은 3, 8888은 4로 카운팅 해야 함)\n'''\nnum = 0\nfor i in range(10000):\n for k in str(i):\n if '8' in str(k):\n num = num+1\nprint(num)\n \n","sub_path":"알고리즘/코딩도장/1부터10000까지 8찾기.py","file_name":"1부터10000까지 8찾기.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"141198485","text":"\"\"\"\nIntro to ML HW 7\nAudrey Zhang\nAndrew ID: youyouz\n\"\"\"\nimport numpy as np\nimport sys\n\n#%%\n\nclass GaussianNaiveBayes:\n def __init__(self, num_features = None):\n self.num_features = num_features\n self.priors = []\n self.means = []\n self.sigmas = []\n self.labels = []\n \n def train(self, train, train_labels, test, test_labels, train_out, test_out, metrics_out, num_voxels = None):\n \n self.labels = list(set(train_labels))\n \n means = np.empty((0, len(train[0])))\n sigmas = np.empty((0, len(train[0])))\n \n for l in range(len(self.labels)):\n self.priors.append(np.char.count(train_labels, self.labels[l]).sum() / len(train_labels))\n\n idx = np.argwhere(train_labels == self.labels[l])\n subset = train[idx]\n mu = np.mean(subset, axis=0)\n sigma = np.std(subset, axis=0)\n \n means = np.append(means, mu, axis = 0)\n sigmas = np.append(sigmas, sigma, axis = 0)\n \n if num_voxels is not None:\n\n diff = np.diff(means, axis = 0)\n \n top_k = np.argsort(-abs(diff))[0][:num_voxels]\n \n new_means = means[:, top_k]\n new_sigmas = sigmas[:, top_k]\n \n self.means = new_means\n self.sigmas = new_sigmas \n \n data = train[:, top_k]\n y_hat, y_pred = self.predict(data)\n \n test_data = test[:, top_k]\n \n test_y_hat, test_y_pred = self.predict(test_data)\n\n \n else:\n self.means = means\n self.sigmas = sigmas\n \n y_hat, y_pred = self.predict(train)\n \n test_y_hat, test_y_pred = self.predict(test)\n \n err_train = self.calc_error_rate(train_labels, y_pred)\n err_test = self.calc_error_rate(test_labels, test_y_pred)\n \n \n with open(train_out, 'w') as output:\n output.write('\\n'.join(list(y_pred)))\n \n with open(test_out, 'w') as output:\n output.write('\\n'.join(list(test_y_pred)))\n \n with open(metrics_out, 'w') as output:\n output.write(\"error(train): {:f}\\n\".format(err_train))\n output.write(\"error(test): {:f}\\n\".format(err_test))\n \n #return y_hat, y_pred\n return err_train, err_test\n \n \n def calc_error_rate(self, true, pred):\n \n correct = (true != pred).sum()\n return correct / len(true)\n \n def predict(self, data):\n \n labels_dict = dict(enumerate(self.labels))\n\n probabilities = np.empty((len(data), 0))\n \n for l in range(len(self.labels)):\n proba = self.calc_proba(data, l)\n proba = np.log(proba).sum(axis = 1) + np.log(self.priors[l])\n\n proba = proba.reshape((len(data), 1))\n probabilities = np.append(probabilities, proba, axis = 1) \n\n \n probabilities = np.array(probabilities)\n \n y_hat = np.argmax(probabilities, axis=1)\n y_pred = np.array(list(map(lambda x: labels_dict.get(x, None), y_hat)))\n \n return y_hat, y_pred\n \n \n def calc_proba(self, data, label):\n \n means = self.means[label]\n sigmas = self.sigmas[label]\n \n cons = 1 / np.sqrt(2 * np.pi * (sigmas**2))\n \n exp = np.exp(-((data - means) ** 2) / (2 * (sigmas **2)))\n \n proba = cons * exp\n \n return proba\n \n \n#%%\n\n \n\ndef main():\n \n # sys args:\n # train_input\n # test_input\n #train_out\n #test_out\n #metrics_out\n #num_voxels\n \n train_input = sys.argv[1]\n test_input = sys.argv[2]\n train_out = sys.argv[3]\n test_out = sys.argv[4]\n metrics_out = sys.argv[5]\n num_voxels = int(sys.argv[6])\n \n \n train= [] \n test = []\n\n with open(train_input, 'r') as file:\n for line in file:\n train.append(line.strip().split(','))\n \n with open(test_input, 'r') as file:\n for line in file:\n test.append(line.strip().split(','))\n \n cols = train[0]\n \n train = np.asarray(train)\n test = np.asarray(test)\n \n\n train_labels = train[1:, -1]\n test_labels = test[1:, -1]\n \n train = train[1:, :-1]\n test = test[1:, :-1]\n \n train = train.astype(float)\n test = test.astype(float)\n \n gnb = GaussianNaiveBayes(1) \n \n err_train, err_test = gnb.train(train, train_labels, test, test_labels, train_out, test_out, metrics_out, num_voxels = num_voxels)\n \n \nif __name__=='__main__': \n \n main()\n \n \n","sub_path":"gnb.py","file_name":"gnb.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"122261916","text":"from WMCore.Configuration import Configuration\nconfig = Configuration()\nconfig.section_('General')\nconfig.General.transferOutputs = True\nconfig.General.workArea = 'WORKINGAREA'\nconfig.General.requestName = 'WORKINGDIR'\nconfig.section_('JobType')\nconfig.JobType.psetName = 'CMSSWCFG'\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.outputFiles = ['OUTFILENAME']\nconfig.section_('Data')\nconfig.Data.inputDataset = 'INPUTDATASET'\nconfig.Data.unitsPerJob = FILESPERJOB #without '' since it must be an int\nconfig.Data.splitting = 'FileBased'\nconfig.Data.publication = False\nconfig.Data.outLFNDirBase = '/store/group/phys_exotica/dijet/Dijet13TeV/juska/Spring15_AK4cors_v2'\nconfig.section_('User')\nconfig.section_('Site')\nconfig.Site.storageSite = 'T2_CH_CERN'\nconfig.Site.blacklist = ['T2_UA_KIPT','T2_US_Caltech']\n","sub_path":"prod/submitJobsWithCrab3/old_inputs/Inputs_Spring15/crab3_template_juska.py","file_name":"crab3_template_juska.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"201125334","text":"#! /usr/bin/python3\r\n\r\nimport glob\r\nfrom lxml import etree\r\n\r\n\"\"\"Parse TCGA LUAD Clinical files to extract information\"\"\"\r\n\r\n__author__ = \"Nafisa Bulsara\"\r\n\r\n\r\nfor files in glob.glob('/path/to/files/*.xml'):\r\n clinical = open(files, 'r')\r\n tree = etree.parse(clinical)\r\n root = tree.getroot()\r\n root.find('./luad:tcga_bcr/luad:patient', root.nsmap)\r\n for rac in root.findall('luad:patient', root.nsmap):\r\n races = rac.find('clin_shared:race_list/clin_shared:race', root.nsmap).text\r\n race = \"\"\r\n if races == 'WHITE':\r\n race = races\r\n for elements in root.findall('luad:patient', root.nsmap):\r\n barcodes = elements.find('shared:bcr_patient_barcode', root.nsmap).text\r\n site = elements.find('clin_shared:tumor_tissue_site', root.nsmap).text\r\n histo_type = elements.find('shared:histological_type', root.nsmap).text\r\n other_dx = elements.find('shared:other_dx', root.nsmap).text\r\n genx = elements.find('shared:gender', root.nsmap).text\r\n try:\r\n karnofsky = int(elements.find('clin_shared:karnofsky_performance_score', root.nsmap).text)\r\n except:\r\n karnofsky = elements.find('clin_shared:karnofsky_performance_score', root.nsmap).text\r\n survive = elements.find('clin_shared:vital_status', root.nsmap).text\r\n kras_mut = elements.find('lung_shared:kras_mutation_found', root.nsmap).text\r\n smoker = elements.find('shared:tobacco_smoking_history', root.nsmap).text\r\n smoking_years = elements.find('clin_shared:number_pack_years_smoked', root.nsmap).text\r\n therapy_rad = elements.find('clin_shared:radiation_therapy', root.nsmap).text\r\n therapy_mol = elements.find('clin_shared:targeted_molecular_therapy', root.nsmap).text\r\n therapy_outcome = elements.find('clin_shared:primary_therapy_outcome_success', root.nsmap).text\r\n age_at_diagnosis=elements.find('clin_shared:age_at_initial_pathologic_diagnosis', root.nsmap).text\r\n drugs = elements.findall('.//rx:drug_name', root.nsmap)\r\n\r\n if len(drugs) >= 1:\r\n drug_name = \"\"\r\n for drug_names in drugs:\r\n drug_name += str(drug_names.text) + \" \"\r\n\r\n print((\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t\".format(race, barcodes, site, histo_type, other_dx, genx, karnofsky, kras_mut, smoker, smoking_years, therapy_rad, therapy_mol, therapy_outcome,age_at_diagnosis ,drug_name)))\r\n\r\n else:\r\n drug_name = 'null'\r\n print((\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t\".format(race, barcodes, site, histo_type, other_dx, genx, karnofsky, kras_mut, smoker, smoking_years, therapy_rad, therapy_mol, therapy_outcome,age_at_diagnosis ,drug_name)))\r\n","sub_path":"parse_tcga_clinical.py","file_name":"parse_tcga_clinical.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"138289103","text":"import minerl\nimport gym\n\nclass _Getch:\n \"\"\"Gets a single character from standard input. Does not echo to the\nscreen.\"\"\"\n def __init__(self):\n try:\n self.impl = _GetchWindows()\n except ImportError:\n self.impl = _GetchUnix()\n\n def __call__(self): return self.impl()\n\n\nclass _GetchUnix:\n def __init__(self):\n import tty, sys\n\n def __call__(self):\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\nclass _GetchWindows:\n def __init__(self):\n import msvcrt\n\n def __call__(self):\n import msvcrt\n return msvcrt.getch()\n\n\ndef get_action(getch, action):\n\n key = getch()\n\n if key.lower() == \"w\":\n action[\"forward\"] = 1\n elif key.lower() == \"a\":\n action[\"left\"] = 1\n elif key.lower() == \"s\":\n action[\"back\"] = 1\n elif key.lower() == \"d\":\n action[\"right\"] = 1\n elif key.lower() == \"u\":\n action[\"attack\"] = 1\n elif key.lower() == \"i\":\n action[\"jump\"] = 1\n elif key.lower() == \"o\":\n action[\"sneak\"] = 1\n elif key.lower() == \"p\":\n action[\"sprint\"] = 1\n elif key.lower() == \"h\":\n action[\"camera\"] = [45, 0]\n elif key.lower() == \"j\":\n action[\"camera\"] = [-45, 0]\n elif key.lower() == \"k\":\n action[\"camera\"] = [0, 45]\n elif key.lower() == \"l\":\n action[\"camera\"] = [0, -45]\n # stop on ctrl+c or q press\n elif key == \"\\x03\" or key.lower() == \"q\":\n return \"quit\"\n # otherwise no action this step\n else:\n return action\n\n return action\n\nif __name__ == \"__main__\":\n env = gym.make('MineRLNavigateDense-v0')\n\n getch = _Getch()\n\n print(\"ACTIONS\")\n print(\"\"\"\n w - forward\n a - left\n s - backward\n d - right\n u - attack\n i - jump\n o - sneak\n p - sprint\n h - camera up\n j - camera down\n k - camera right\n l - camera left\n \"\"\")\n\n obs, _ = env.reset()\n done = False\n net_reward = 0\n\n while not done:\n default_action = env.action_space.noop()\n action = get_action(getch, default_action)\n\n obs, reward, done, info = env.step(action)\n\n net_reward += reward\n print(\"Reward {}\".format(net_reward))","sub_path":"simple_agents/keyboard_agent.py","file_name":"keyboard_agent.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"66176676","text":"import numpy as np\nimport math\n\ndef uniqueChar(s1):\n \"\"\" Solve problem.\n Implement a alg that determines if a\n string is all unique characters.\n \"\"\"\n # Initialize hash\n dict = {}\n\n # Check each character. If we find a duplicate, return False\n # Otherwise, add to dictionary and continue\n for c in s1:\n if c in dict:\n return False\n else:\n dict[c] = 1\n\n # Didn't fail, so must be unique\n return True\n\n\ndef uniqueCharNoHash(s1):\n \"\"\"Check for unique characters without using any new structures.\"\"\"\n s = list(s1)\n\n # Sort the list\n s = sorted(s)\n\n # Now step through the list and check the next character\n for ii in range(len(s)-1):\n if s[ii] == s[ii+1]:\n return False\n\n # If we're still here, must be unique\n return True\n\ndef isAnagram(s1, s2):\n \"\"\" Determine if s1 and s2 are anagrams.\"\"\"\n\n scompare = s2[::-1]\n if s1 == scompare:\n return True\n else:\n return False\n\n\ndef rotateMatrix(matrix):\n \"\"\"Rotate matrix 90 degrees\"\"\"\n N = matrix.shape[0]\n\n for i in range(math.floor(N/2)+1):\n last = N - 1 -i # Last element used in this layer\n for j in range(i, N-i-1):\n offset = j-i\n temp = matrix[i,j]\n\n matrix[i,j] = matrix[last - offset, i]\n matrix[last - offset, i] = matrix[last, last-offset]\n matrix[last, last-offset] = matrix[j, last]\n matrix[j,last] = temp\n\n print(matrix)\n\ndef setRowColZero(matrix):\n \"\"\" If an element is zero, set it's row and column to zero\"\"\"\n icol = []\n jcol = []\n\n M = matrix.shape[0]\n N = matrix.shape[1]\n\n for ii in range(M):\n for jj in range(N):\n if matrix[ii,jj] == 0:\n icol.append(ii)\n jcol.append(jj)\n\n # If we saw no zeros, just return original matrix\n if len(icol) == 0:\n return matrix\n\n for ii, jj in zip(icol, jcol):\n matrix[:,jj] = 0\n matrix[ii,:] = 0\n\n return matrix\n\n\n\nif __name__ == '__main__':\n \"\"\"Test\"\"\"\n a = np.array([[1, 2, 3], [2, 0, 4], [0, 5, 6]])\n print(a)\n print(setRowColZero(a))\n","sub_path":"InterviewPrep/arrayString.py","file_name":"arrayString.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"522319986","text":"from os import path\n\n# game options/settings\nTITLE = \"Pit Jumper\"\nWIDTH = 1200\nHEIGHT = 600\nFPS = 60\nFONT_NAME = 'arial'\nHS_FILE = \"highscore.txt\"\nSPRITESHEET = \"spritesheet_jumper.png\"\nBACKGROUNDTITLE = \"pitjumpertitle.png\"\nGAME_BG = \"sky.png\"\n\n# Sound properties\nSOUND = path.join('./sound')\nGAME_TRACK = 'Venus.wav'\nPAUSE_TRACK = 'Mars.wav'\nGAME_OVER = 'Win Jingle.wav'\nLOOP = -1\n\n# Player properties\nPLAYER_ACC = 0.5\nPLAYER_FRICTION = -0.12\nPLAYER_GRAV = 0.8\nPLAYER_JUMP = 20\n\n# Starting platforms\n# PLATFORM_LIST = [(0, HEIGHT - 60),\n# (WIDTH / 2 - 50, HEIGHT * 3 / 4 - 50),\n# (125, HEIGHT - 350),\n# (350, 200),\n# (400, 300),\n# (500, 40),\n# (900, 600),\n# (500, 100),\n# (800, 100),\n# (600, 40),\n# (450, 600),\n# (150, HEIGHT-60),\n# (800, HEIGHT - 60),\n# ]\nPLATFORM_LIST = [(0, HEIGHT - 60),\n (300, HEIGHT - 60),\n (600, HEIGHT - 60),\n (900, HEIGHT - 60),\n (150, HEIGHT -280),\n (450, HEIGHT - 180),\n (750, HEIGHT - 200),\n (1050, HEIGHT - 240),\n ]\n\n# Starting Enemies\nENEMY_LIST = [(0, HEIGHT - 60, 5)]\n\n# define colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nLIGHTBLUE = (0, 155, 155)\nBGCOLOR = LIGHTBLUE","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"434258809","text":"from odoo import models, fields, api, _ \nfrom odoo.exceptions import UserError, ValidationError\nfrom datetime import datetime\nimport logging\n\n\n_logger = logging.getLogger(__name__)\n\nAVAILABLE_STATES = [\n ('draft','New'),\n ('open','Open'),\n ('confirm','Confirm'),\n ('done','Close'),\n ('post','Posted'),\n ('cancel','Cancelled'),\n]\n\nAVAILABLE_DEPOSIT = [\n ('cash','Cash'),\n ('transfer','Transfer'),\n ('cheque','Cheque'),\n]\n\nclass CustomerDeposit(models.Model):\n _name = 'customer.deposit'\n _inherit = ['mail.thread', 'mail.activity.mixin']\n\n @api.one\n def trans_confirm(self):\n #cust_deposit_obj = self.pool.get('cust.deposit')\n #deposit = self.browse(cr, uid, ids, context=context)[0]\n #pricelist_id = deposit.pricelist_id\n #version_id = pricelist_id.version_id[0]\n #self._generate_public_pricelist(cr, uid, version_id.id, context=context)\n self.write({'state': 'open'})\n\n\n @api.one\n def calculate_rest_amount(self):\n result = {}\n #company_id = self._get_company(cr, uid, context=context)\n #if not company_id.deposit_journal_id:\n # raise osv.except_osv(_('Error!'), _('Please define default deposit journal for the company.'))\n Params = self.env['ir.config_parameter'].sudo()\n customer_deposit_account_id = Params.get_param('0043_customer_deposit.customer_deposit_account_id') or False\n\n for deposit in self:\n if not customer_deposit_account_id:\n result[deposit.id] = 0\n else: \n for deposit in self:\n sql = \"\"\"SELECT sum(a.credit - a.debit) as rest_amount\n FROM account_move_line a\n INNER JOIN account_move b ON a.move_id = b.id\n WHERE b.cust_deposit_id=%s AND a.account_id=%s\"\"\"\n\n self.env.cr.execute(sql, (deposit.id, customer_deposit_account_id))\n result[deposit.id] = self.env.cr.fetchone()[0]\n \n \n def auto_create_pricelist(self):\n pricelist_obj = self.env['product.pricelist']\n pricelist_item_obj = self.env['product.pricelist.item']\n pricelist_vals = {\n 'partner_id': self.partner_id.id,\n 'name': 'Pricelist - ' + self.partner_id.name,\n 'type': 'sale',\n 'is_deposit': True\n }\n pricelist_id = pricelist_obj.create(pricelist_vals)\n super(CustomerDeposit, self).write({'pricelist_id': pricelist_id.id})\n\n\n name = fields.Char('Name', size=50)\n trans_number = fields.Char('', size=20, readonly=True)\n trans_date = fields.Date('Transaction Date', default=fields.Date.context_today, required=True)\n partner_id = fields.Many2one('res.partner','Partner # ', required=True)\n pricelist_id = fields.Many2one('product.pricelist','Pricelist', readonly=True)\n iface_generated = fields.Boolean('Generated', readonly=True)\n rest_amount = fields.Float(string='Rest Amount', compute=\"calculate_rest_amount\")\n payment_ids = fields.One2many('customer.deposit.payment', 'cust_deposit_id', 'Payments')\n product_ids = fields.One2many('customer.deposit.product', 'cust_deposit_id', 'Products')\n partner_ids = fields.One2many('customer.deposit.partner', 'cust_deposit_id', 'Partners')\n account_move_ids = fields.One2many('account.move', 'cust_deposit_id', 'Moves')\n state = fields.Selection(AVAILABLE_STATES, 'Status', default='draft', readonly=True)\n\n @api.model\n def create(self, vals):\n if vals.get('trans_number', _('New')) == _('New'):\n if 'company_id' in vals:\n vals['trans_number'] = self.env['ir.sequence'].with_context(force_company=vals['company_id']).next_by_code('customer.deposit') or _('New')\n else:\n vals['trans_number'] = self.env['ir.sequence'].next_by_code('customer.deposit') or _('New')\n #trans_number = datetime.now().strftime('%Y%m%d%H%M%S')\n #vals.update({'trans_number': trans_number})\n res = super(CustomerDeposit, self).create(vals)\n res.auto_create_pricelist()\n return res\n\n\nclass CustomerDepositPayment(models.Model):\n _name = 'customer.deposit.payment'\n\n @api.one\n def trans_reopen(self):\n payment = self\n deposit = payment.cust_deposit_id\n company_id = self.env.user.company_id\n if payment.account_move_id and payment.account_move_id.state == 'posted':\n raise ValidationError(_('Error!'), _('Please Cancel Journal Entry before Re-Open Payment!.'))\n else:\n #Delete Journal Entry\n if payment.account_move_id:\n self.env['account.move'].unlink(payment.account_move_id.id)\n super(CustomerDepositPayment, self).write({'state': 'open','iface_generated': False})\n\n\n def create_account_move(self):\n\n for payment in self:\n deposit = payment.cust_deposit_id\n company_id = self.env.user.company_id \n\n l1 = {\n 'name': deposit.partner_id.name + \" Deposit\",\n 'debit': payment.amount,\n 'credit': 0.0,\n 'account_id': payment.account_id.id,\n 'partner_id': deposit.partner_id.id,\n 'ref': payment.name,\n 'date': payment.trans_date,\n 'company_id': company_id.id,\n }\n\n l2 = {\n 'name': deposit.partner_id.name + \" Deposit\",\n 'debit': 0.0,\n 'credit': payment.amount,\n 'account_id': payment.journal_id.default_credit_account_id.id,\n 'partner_id': deposit.partner_id.id,\n 'ref': payment.name,\n 'date': payment.trans_date,\n 'company_id': company_id.id,\n }\n\n\n move_vals = {\n 'ref': payment.name,\n 'line_ids': [(0, 0, l1), (0, 0, l2)],\n 'journal_id': payment.journal_id.id,\n 'cust_deposit_id': deposit.id,\n 'date': payment.trans_date,\n 'narration': deposit.partner_id.name + \" Deposit\",\n 'company_id': company_id.id,\n }\n\n move = self.env['account.move'].create(move_vals)\n if move:\n super(CustomerDepositPayment, self).write({'account_move_id':move.id,'iface_generated':True, 'state':'done'})\n Params = self.env['ir.config_parameter'].sudo()\n is_auto_posted = Params.get_param('0043_customer_deposit.is_auto_posted')\n if is_auto_posted:\n move.action_post()\n\n return True\n\n\n cust_deposit_id = fields.Many2one('customer.deposit', 'Deposit #', readonly=True)\n name = fields.Char('Trans #', size=50, readonly=True)\n nickname = fields.Char(related='cust_deposit_id.partner_id.name', store=True)\n partner_id = fields.Many2one('res.partner', 'Customer', readonly=True)\n trans_date = fields.Date('Transaction Date', required=True)\n journal_id = fields.Many2one('account.journal', 'Journal', readonly=True)\n account_id = fields.Many2one('account.account', 'Bank or Check Account', required=True)\n method_type = fields.Selection(AVAILABLE_DEPOSIT, 'Method', size=16, required=True)\n cheque_number = fields.Char('Cheque #', size=50)\n cheque_due_date = fields.Date('Cheque Due Date')\n amount = fields.Float('Amount', required=True)\n account_move_id = fields.Many2one('account.move', 'Journal Entry', readonly=True)\n iface_generated = fields.Boolean('Generated', readonly=True)\n state = fields.Selection(AVAILABLE_STATES, 'Status', size=16, default='open', readonly=True)\n\n @api.model \n def create(self, vals):\n if vals.get('name', _('New')) == _('New'):\n if 'company_id' in vals:\n vals['name'] = self.env['ir.sequence'].with_context(force_company=vals['company_id']).next_by_code('customer.deposit.payment') or _('New')\n else:\n vals['name'] = self.env['ir.sequence'].next_by_code('customer.deposit.payment') or _('New')\n \n Params = self.env['ir.config_parameter'].sudo()\n customer_deposit_journal_id = Params.get_param('0043_customer_deposit.customer_deposit_journal_id') or False\n _logger.info(customer_deposit_journal_id)\n vals['journal_id'] = customer_deposit_journal_id\n\n res = super(CustomerDepositPayment, self).create(vals)\n return res \n\n\nclass CustomerDepositPartner(models.Model):\n _name = 'customer.deposit.partner'\n\n cust_deposit_id = fields.Many2one('customer.deposit','Deposit #', readonly=True)\n partner_id = fields.Many2one('res.partner', \"Partners\", required=True)\n\n _sql_constraints = [ ('unique_deposit_partner', 'unique(cust_deposit_id, partner_id)', 'Partner already exsist\\n Please, select a different partner')\t]\n\n\nclass CustomerDepositProduct(models.Model):\n _name = 'customer.deposit.product'\n\n def add_product(self):\n vals = {}\n vals.update({'pricelist_id': self.cust_deposit_id.pricelist_id.id})\n vals.update({'cust_deposit_product_id': self.id})\n if self.type == 'product':\n vals.update({'applied_on': '1_product'})\n #vals.update({'name': self.product_id.name})\n vals.update({'product_tmpl_id': self.product_id.id})\n if self.type == 'category':\n vals.update({'applied_on': '2_product_category'})\n #vals.update({'name': self.product_category_id.name})\n vals.update({'categ_id': self.product_category_id.id})\n if self.type == 'merk':\n vals.update({'applied_on': '4_merk'})\n #vals.update({'name': self.product_merk_id.name})\n vals.update({'merk_id': self.product_merk_id.id})\n \n vals.update({'base': 'list_price'})\n vals.update({'compute_price': 'formula'})\n \n if self.discount_type == 'amount':\n vals.update({'price_surcharge': self.amount})\n else:\n if -100 <= self.percentage <= 100:\n vals.update({'price_discount': self.percentage / 100})\n else:\n raise ValidationError(_('Error!'), _('Percentage between -100 and 100!.'))\n\n res = self.env['product.pricelist.item'].create(vals)\n\n def update_product(self):\n item = self.env['product.pricelist.item'].search({'cust_deposit_product_id': self.id})\n if not item:\n raise ValidationError(_('Error!'), _('Product Pricelist Item not found'))\n\n vals = {}\n if self.type == 'product':\n vals.update({'name': self.product_id.name})\n vals.update({'product_id': self.product_id.id})\n if self.type == 'category':\n vals.update({'name': self.product_category_id.name})\n vals.update({'categ_id': self.product_category_id.id})\n if self.type == 'merk':\n vals.update({'name': self.product_merk_id.name})\n vals.update({'product_merk_id': self.product_merk_id.id})\n \n vals.update({'base': 'list_price'})\n\n if self.discount_type == 'amount':\n vals.update({'price_surcharge': self.amount})\n else:\n if -100 <= self.percentage <= 100:\n vals.update({'price_discount': self.percentage / 100})\n else:\n raise ValidationError(_('Error!'), _('Percentage between -100 and 100!.'))\n\n super(CustomerDepositProduct, self).write(vals)\n\n\n @api.one\n @api.depends('product_id', 'product_category_id', 'product_merk_id', 'type')\n def _get_product_deposit_name(self):\n if self.product_category_id:\n self.name = _(\"Category: %s\") % (self.product_category_id.name)\n elif self.product_id:\n self.name = self.product_id.name\n elif self.product_merk_id:\n self.name = _(\"Merk: %s\") % (self.product_merk_id.name)\n else:\n self.name = ''\n\n @api.onchange('type')\n def _onchange_applied_on(self):\n if self.type != 'product':\n self.product_id = False\n if self.type != 'category':\n self.product_category_id = False\n if self.type != 'merk':\n self.product_merk_id = False\n\n\n cust_deposit_id = fields.Many2one('customer.deposit','Deposit #', readonly=True)\n name = fields.Char('Name', compute='_get_product_deposit_name', readonly=True)\n type = fields.Selection([('product','By Product'),('category','By Category'),('merk','By Merk')], 'Type', default='product', required=True)\n product_id = fields.Many2one('product.template', \"Products\")\n product_category_id = fields.Many2one('product.category', \"Product Category\")\n product_merk_id = fields.Many2one('product.merk','Product Merk')\n discount_type = fields.Selection([('amount','Amount'),('percentage','Percentage')], 'Discount Type', default='amount')\n percentage = fields.Float('Percentage')\n amount = fields.Float('Amount')\n\n @api.model\n def create(self, vals): \n res = super(CustomerDepositProduct, self).create(vals)\n res.add_product()\n return res\n\n @api.multi\n def write(self, vals): \n res = super(CustomerDepositProduct, self).write(vals)\n res.update_product()\n return res\n\n\n @api.multi \n def unlink(self):\n item = self.env['product.pricelist.item'].search([('cust_deposit_product_id', '=', self.id)])\n if item:\n item.unlink()\n super(CustomerDepositProduct, self).unlink()\n","sub_path":"modules/8.0/0043_customer_deposit/models/customer_deposit.py","file_name":"customer_deposit.py","file_ext":"py","file_size_in_byte":13598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"266455637","text":"\n\nfrom xai.brain.wordbase.verbs._overturn import _OVERTURN\n\n#calss header\nclass _OVERTURNS(_OVERTURN, ):\n\tdef __init__(self,): \n\t\t_OVERTURN.__init__(self)\n\t\tself.name = \"OVERTURNS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"overturn\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_overturns.py","file_name":"_overturns.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"322706971","text":"# %tensorflow_version 2.x\n\nimport tensorflow as tf\nprint(tf.test.gpu_device_name())\nprint(tf.test.is_gpu_available())\n\n#### Import relevant libraries###\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n#from scipy.io import loadmat\nimport pandas as pd\nimport random\nimport os\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nimport tensorflow.keras.preprocessing.text as kpt\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Sequential, Model, load_model\nfrom tensorflow.keras.layers import Layer, Input, Dense, Embedding, LSTM, Flatten, Dropout, SpatialDropout1D, Bidirectional, GRU, LeakyReLU, TimeDistributed, Concatenate, Reshape, Conv1D,MaxPooling1D,Conv2D,MaxPooling2D, AveragePooling1D\n#from tensorflow.compat.v1.keras.layers import CuDNNLSTM\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.utils import to_categorical, plot_model\nimport re\nimport math\n\n##### initialize a seed for repeatability\n\nos.environ['PYTHONHASHSEED'] = '0'\nrandom.seed(42)\nnp.random.seed(42)\ntf.random.set_random_seed(42)\n\n#### Set paths to Training, Dev and Test Data###\ndef infer_model(iii,jjj,X_cont_test,Y_cont_test, X_shuffled_test,Y_shuffled_test):\n X_tr_pd_path = \"../../Dataset and Predictions/Data_%s_Frames/X_tr_pd.csv\" %(str(iii+1))\n Y_tr_pd_path = \"../../Dataset and Predictions/Data_%s_Frames/Y_tr_pd.csv\" %(str(iii+1))\n\n X_test_pd_path = \"../../Dataset and Predictions/Data_%s_Frames/X_test_pd.csv\" %(str(iii+1))\n Y_test_pd_path = \"../../Dataset and Predictions/Data_%s_Frames/Y_test_pd.csv\" %(str(iii+1))\n\n ####Load the pd Dataframes directly###\n\n X_tr_df = pd.read_csv(X_tr_pd_path)\n Y_tr_df = pd.read_csv(Y_tr_pd_path)\n\n # X_dev_df = pd.read_csv(X_dev_pd_path)\n #Y_dev_df = pd.read_csv(Y_dev_pd_path)\n\n X_test_df = pd.read_csv(X_test_pd_path)\n Y_test_df = pd.read_csv(Y_test_pd_path)\n\n print(X_tr_df.shape)\n print(Y_tr_df.shape)\n\n print(X_test_df.shape)\n print(Y_test_df.shape)\n\n ####Preprocess Voxels by creating a voxel vocabulary\n\n ## Add start and end token to the output voxels\n ###Similar to start and end of sentence\n ###\n\n Y_tr_df['Voxels'] = Y_tr_df['Voxels'].apply(lambda x : 'sostok '+ x + ' eostok')\n Y_test_df['Voxels'] = Y_test_df['Voxels'].apply(lambda x : 'sostok '+ x + ' eostok')\n\n ##### Concatenate Training, Dev and Test Data to obtain all the possible occuring voxels###\n\n combined_data_X = pd.concat([X_tr_df,X_test_df])\n combined_data_Y = pd.concat([Y_tr_df,Y_test_df])\n\n print(combined_data_X.shape)\n print(combined_data_Y.shape)\n\n ##### Tokenize the vocab on the combined input data###\n\n x_tokenizer = Tokenizer(num_words=45000,lower=True,split=' ')\n x_tokenizer.fit_on_texts(combined_data_X['Voxels'].values) #We are only interested in the 'Voxels' column's vocabulary\n x_dictionary = x_tokenizer.word_index #A dictionary of Voxel-index pairs\n\n vocab_size_x = len(x_dictionary)\n print(vocab_size_x)\n\n X_train_tok = x_tokenizer.texts_to_sequences(X_tr_df['Voxels']) #For the training data\n X_test_tok = x_tokenizer.texts_to_sequences(X_test_df['Voxels']) #For the test data\n\n np.shape(X_train_tok)\n\n ##### Tokenize the vocab on the combined input data###\n\n y_tokenizer = Tokenizer(num_words=45000,lower=True,split=' ')\n y_tokenizer.fit_on_texts(combined_data_Y['Voxels'].values) #We are only interested in the 'Voxels' column's vocabulary\n y_dictionary = y_tokenizer.word_index #A dictionary of Voxel-index pairs\n\n vocab_size_y = len(y_dictionary)+1\n print(vocab_size_y)\n\n Y_train_tok = y_tokenizer.texts_to_sequences(Y_tr_df['Voxels']) #For the training data\n #Y_dev_tok = y_tokenizer.texts_to_sequences(Y_dev_df['Voxels']) #For the development data\n Y_test_tok = y_tokenizer.texts_to_sequences(Y_test_df['Voxels']) #For the test data\n\n np.shape(Y_train_tok)\n\n ####Reorganize the input data to a sliding stack of multiple frames\n\n ##Define a function to take in an array and output a sliding stack of multiple frames\n ###\n X_tr = np.array(X_train_tok)-1\n Y_tr = np.array(Y_train_tok)\n\n X_test = np.array(X_test_tok)-1\n Y_test = np.array(Y_test_tok)\n\n print(np.shape(X_tr))\n print(np.shape(Y_tr))\n\n print(np.shape(X_test))\n print(np.shape(Y_test))\n\n X_tr = np.reshape(X_tr,(np.shape(X_tr)[0],int((np.shape(X_tr)[1])/90),90))\n X_test = np.reshape(X_test,(np.shape(X_test)[0],int((np.shape(X_test)[1])/90),90))\n\n print(np.shape(X_tr))\n print(np.shape(Y_tr))\n\n \n print(np.shape(X_test))\n print(np.shape(Y_test))\n\n\n\n\n latent_dim = 512\n ### Attention Layer ####\n import tensorflow as tf\n import os\n from tensorflow.python.keras.layers import Layer\n from tensorflow.python.keras import backend as K\n\n\n class AttentionLayer(Layer):\n \"\"\"\n This class implements Bahdanau attention (https://arxiv.org/pdf/1409.0473.pdf).\n There are three sets of weights introduced W_a, U_a, and V_a\n \"\"\"\n\n def __init__(self, **kwargs):\n super(AttentionLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n assert isinstance(input_shape, list)\n # Create a trainable weight variable for this layer.\n\n self.W_a = self.add_weight(name='W_a',\n shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),\n initializer='uniform',\n trainable=True)\n self.U_a = self.add_weight(name='U_a',\n shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),\n initializer='uniform',\n trainable=True)\n self.V_a = self.add_weight(name='V_a',\n shape=tf.TensorShape((input_shape[0][2], 1)),\n initializer='uniform',\n trainable=True)\n\n super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end\n\n def call(self, inputs, verbose=False):\n \"\"\"\n inputs: [encoder_output_sequence, decoder_output_sequence]\n \"\"\"\n assert type(inputs) == list\n encoder_out_seq, decoder_out_seq = inputs\n if verbose:\n print('encoder_out_seq>', encoder_out_seq.shape)\n print('decoder_out_seq>', decoder_out_seq.shape)\n\n def energy_step(inputs, states):\n \"\"\" Step function for computing energy for a single decoder state \"\"\"\n\n assert_msg = \"States must be a list. However states {} is of type {}\".format(states, type(states))\n assert isinstance(states, list) or isinstance(states, tuple), assert_msg\n\n \"\"\" Some parameters required for shaping tensors\"\"\"\n en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]\n de_hidden = inputs.shape[-1]\n\n \"\"\" Computing S.Wa where S=[s0, s1, ..., si]\"\"\"\n # <= batch_size*en_seq_len, latent_dim\n reshaped_enc_outputs = K.reshape(encoder_out_seq, (-1, en_hidden))\n # <= batch_size*en_seq_len, latent_dim\n W_a_dot_s = K.reshape(K.dot(reshaped_enc_outputs, self.W_a), (-1, en_seq_len, en_hidden))\n if verbose:\n print('wa.s>',W_a_dot_s.shape)\n\n \"\"\" Computing hj.Ua \"\"\"\n U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim\n if verbose:\n print('Ua.h>',U_a_dot_h.shape)\n\n \"\"\" tanh(S.Wa + hj.Ua) \"\"\"\n # <= batch_size*en_seq_len, latent_dim\n reshaped_Ws_plus_Uh = K.tanh(K.reshape(W_a_dot_s + U_a_dot_h, (-1, en_hidden)))\n if verbose:\n print('Ws+Uh>', reshaped_Ws_plus_Uh.shape)\n\n \"\"\" softmax(va.tanh(S.Wa + hj.Ua)) \"\"\"\n # <= batch_size, en_seq_len\n e_i = K.reshape(K.dot(reshaped_Ws_plus_Uh, self.V_a), (-1, en_seq_len))\n # <= batch_size, en_seq_len\n e_i = K.softmax(e_i)\n\n if verbose:\n print('ei>', e_i.shape)\n\n return e_i, [e_i]\n\n def context_step(inputs, states):\n \"\"\" Step function for computing ci using ei \"\"\"\n # <= batch_size, hidden_size\n c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)\n if verbose:\n print('ci>', c_i.shape)\n return c_i, [c_i]\n\n def create_inital_state(inputs, hidden_size):\n # We are not using initial states, but need to pass something to K.rnn funciton\n fake_state = K.zeros_like(inputs) # <= (batch_size, enc_seq_len, latent_dim\n fake_state = K.sum(fake_state, axis=[1, 2]) # <= (batch_size)\n fake_state = K.expand_dims(fake_state) # <= (batch_size, 1)\n fake_state = K.tile(fake_state, [1, hidden_size]) # <= (batch_size, latent_dim\n return fake_state\n\n fake_state_c = create_inital_state(encoder_out_seq, encoder_out_seq.shape[-1])\n fake_state_e = create_inital_state(encoder_out_seq, encoder_out_seq.shape[1]) # <= (batch_size, enc_seq_len, latent_dim\n\n \"\"\" Computing energy outputs \"\"\"\n # e_outputs => (batch_size, de_seq_len, en_seq_len)\n last_out, e_outputs, _ = K.rnn(\n energy_step, decoder_out_seq, [fake_state_e],\n )\n\n \"\"\" Computing context vectors \"\"\"\n last_out, c_outputs, _ = K.rnn(\n context_step, e_outputs, [fake_state_c],\n )\n\n return c_outputs, e_outputs\n\n def compute_output_shape(self, input_shape):\n \"\"\" Outputs produced by the layer \"\"\"\n return [\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\n ]\n \n from tensorflow.keras.models import load_model\n # Assuming your model includes instance of an \"AttentionLayer\" class\n model_path = \"../../Dataset and Predictions/Data_%s_Frames/Models/mmpose-nlp-%s-frames_iter_%s.h5\" %(str(iii+1),str(iii+1),str(jjj+1))\n model = load_model(model_path, custom_objects={'AttentionLayer': AttentionLayer})\n #model.summary() \n\n reverse_target_word_index=y_tokenizer.index_word\n reverse_source_word_index=x_tokenizer.index_word\n target_word_index=y_tokenizer.word_index \n\n # Encoder\n encoder_inputs = model.input[0]\n\n #Embedding layer\n enc_emb = model.layers[1]\n enc_emb = enc_emb(encoder_inputs)\n\n #Reshape layer\n\n reshape_emb = model.layers[2]\n reshape_emb = reshape_emb(enc_emb)\n\n #encoder gru 1\n encoder_gru1 = model.layers[6]\n encoder_output1, _ = encoder_gru1(reshape_emb)\n\n #encoder gru 2\n encoder_gru2= model.layers[8]\n encoder_outputs, f_state_h= encoder_gru2(encoder_output1)\n\n encoder_model = Model(inputs=encoder_inputs,outputs=[encoder_outputs, f_state_h])\n\n ##encoder_model.summary() \n\n # Set up the decoder, using `encoder_states` as initial state.\n decoder_inputs = model.input[1]\n decoder_state_input = Input(shape=(latent_dim,),name=\"dec_input_h_1\")\n decoder_hidden_state_input = Input(shape=(np.shape(X_tr)[1],latent_dim,))\n\n #embedding layer\n dec_emb_layer = model.layers[5]\n dec_emb = dec_emb_layer(decoder_inputs)\n\n #decoder gru 1\n decoder_gru1 = model.layers[9]\n decoder_outputs, dec_state = decoder_gru1(dec_emb,initial_state=decoder_state_input)\n\n #decoder gru 2\n #decoder_gru2 = model.layers[9]\n #decoder_outputs, _ = decoder_gru2(decoder_outputs1)\n\n ###### Add in Attention Layer###\n\n # Attention layer\n attn_layer = model.layers[10]\n attn_out, attn_states = attn_layer([decoder_hidden_state_input, decoder_outputs])\n\n ###### Concatenate Attn. Output and Decoder Output to the Output Layer###\n\n # Concat attention input and decoder LSTM output\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_outputs, attn_out])\n\n #attn_dropout = (TimeDistributed(Dropout(rate=0.4)))(decoder_concat_input)\n\n decoder_dense = model.layers[13]\n decoder_outputs_final = decoder_dense(decoder_concat_input)\n\n decoder_model = Model([decoder_inputs]+[decoder_state_input,decoder_hidden_state_input], [decoder_outputs_final]+[dec_state])\n ##decoder_model.summary() \n #print('Loaded')\n print('Loaded')\n def decode_sequence(input_seq):\n max_summary_len = 27\n # Encode the input as state vectors.\n e_out, e_h = encoder_model.predict(input_seq)\n \n # Generate empty target sequence of length 1.\n target_seq = np.zeros((1,1))\n \n # Populate the first word of target sequence with the start word.\n target_seq[0, 0] = target_word_index['sostok']\n\n stop_condition = False\n decoded_sentence = ''\n while not stop_condition:\n \n output_tokens, h = decoder_model.predict([target_seq] + [e_h, e_out])\n\n # Sample a token\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_token = reverse_target_word_index[sampled_token_index]\n \n if(sampled_token!='eostok'):\n decoded_sentence += ' '+sampled_token\n\n # Exit condition: either hit max length or find stop word.\n if (sampled_token == 'eostok' or len(decoded_sentence.split()) >= (max_summary_len-1)):\n stop_condition = True\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1,1))\n target_seq[0, 0] = sampled_token_index\n\n # Update internal states\n e_h = h\n\n return decoded_sentence\n\n def seq2summary(input_seq):\n newString=''\n for i in input_seq:\n if((i!=0 and i!=target_word_index['sostok']) and i!=target_word_index['eostok']):\n newString=newString+reverse_target_word_index[i]+' '\n return newString\n\n def seq2text(input_seq):\n newString=''\n for i in input_seq:\n if(i!=0):\n newString=newString+reverse_source_word_index[i]+' '\n return newString\n \n def str2int(str_seq):\n int_seq = [int(kk) for kk in str_seq]\n return int_seq\n \n GT=[]\n pred=[]\n max_text_len=25\n for i in range(np.shape(X_cont_test)[0]):\n #if i!=770:\n prediction = str2int(decode_sequence(X_cont_test[i].reshape(1,np.shape(X_tr)[1],np.shape(X_tr)[2])).split())\n if len(prediction)==25:\n GT.append(str2int(seq2summary(Y_cont_test[i]).split()))\n pred.append(prediction)\n else:\n print(i)\n\n GT = np.array(GT)\n pred = np.array(pred)\n\n voxel_dict = np.load('../../Dataset and Predictions/VD.npy')\n GroundTruth = np.zeros((np.shape(GT)[0],np.shape(GT)[1],3))\n Prediction = np.zeros((np.shape(GT)[0],np.shape(GT)[1],3))\n\n for i in range(np.shape(GT)[0]):\n for j in range(np.shape(GT)[1]):\n GroundTruth[i,j,:] = voxel_dict[int(GT[i,j]+1)]\n Prediction[i,j,:] = voxel_dict[int(pred[i,j]+1)]\n \n gtpath = \"../../Dataset and Predictions/Data_%s_Frames/Ground Truth/GT_cont_test_%s.npy\" %(str(iii+1),str(jjj+1))\n predpath = \"../../Dataset and Predictions/Data_%s_Frames/Predictions/Pred_cont_test_%s.npy\" %(str(iii+1),str(jjj+1))\n\n np.save(gtpath,GroundTruth)\n np.save(predpath,Prediction)\n\n mean_3d_error = np.mean(abs(GroundTruth-Prediction),axis=0)\n\n print(mean_3d_error)\n \n error_path = \"../../Dataset and Predictions/Data_%s_Frames/MAE/MAE_cont_test_%s.npy\" %(str(iii+1),str(jjj+1))\n np.save(error_path,mean_3d_error)\n\n GT=[]\n pred=[]\n max_text_len=25\n for i in range(np.shape(X_shuffled_test)[0]):\n #if i!=770:\n prediction = str2int(decode_sequence(X_shuffled_test[i].reshape(1,np.shape(X_tr)[1],np.shape(X_tr)[2])).split())\n if len(prediction)==25:\n GT.append(str2int(seq2summary(Y_shuffled_test[i]).split()))\n pred.append(prediction)\n else:\n print(i)\n\n GT = np.array(GT)\n pred = np.array(pred)\n\n voxel_dict = np.load('../../Dataset and Predictions/VD.npy')\n GroundTruth = np.zeros((np.shape(GT)[0],np.shape(GT)[1],3))\n Prediction = np.zeros((np.shape(GT)[0],np.shape(GT)[1],3))\n\n for i in range(np.shape(GT)[0]):\n for j in range(np.shape(GT)[1]):\n GroundTruth[i,j,:] = voxel_dict[int(GT[i,j]+1)]\n Prediction[i,j,:] = voxel_dict[int(pred[i,j]+1)]\n \n gtpath = \"../../Dataset and Predictions/Data_%s_Frames/Ground Truth/GT_shuffled_test_%s.npy\" %(str(iii+1),str(jjj+1))\n predpath = \"../../Dataset and Predictions/Data_%s_Frames/Predictions/Pred_shuffled_test_%s.npy\" %(str(iii+1),str(jjj+1))\n np.save(gtpath,GroundTruth)\n np.save(predpath,Prediction)\n\n mean_3d_error = np.mean(abs(GroundTruth-Prediction),axis=0)\n\n print(mean_3d_error)\n \n error_path = \"../../Dataset and Predictions/Data_%s_Frames/MAE/MAE_shuffled_test_%s.npy\" %(str(iii+1),str(jjj+1))\n np.save(error_path,mean_3d_error)\n\nfor iii in range(0,10):\n for jjj in range(0,5):\n\n X_shuf_test_path = \"../../Dataset and Predictions/Data_%s_Frames/Datasets/X_shuf_test_iter_%s.npy\" %(str(iii+1),str(jjj+1))\n Y_shuf_test_path = \"../../Dataset and Predictions/Data_%s_Frames/Datasets/Y_shuf_test_iter_%s.npy\" %(str(iii+1),str(jjj+1))\n X_cont_test_path = \"../../Dataset and Predictions/Data_%s_Frames/Datasets/X_cont_test_iter_%s.npy\" %(str(iii+1),str(jjj+1))\n Y_cont_test_path = \"../../Dataset and Predictions/Data_%s_Frames/Datasets/Y_cont_test_iter_%s.npy\" %(str(iii+1),str(jjj+1))\n \n\n X_cont_test = np.load(X_cont_test_path)\n Y_cont_test = np.load(Y_cont_test_path)\n\n X_shuffled_test = np.load(X_shuf_test_path)\n Y_shuffled_test = np.load(Y_shuf_test_path)\n\n\n infer_model(iii,jjj,X_cont_test,Y_cont_test, X_shuffled_test,Y_shuffled_test)\n","sub_path":"Source Codes/Python Files/master_infer_mmpose_nlp_gru.py","file_name":"master_infer_mmpose_nlp_gru.py","file_ext":"py","file_size_in_byte":18482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"29907941","text":"from flask_login import current_user\n\nfrom app.datasource import register, api_assert\nfrom app.db import DBSession\nfrom app.auth.permission import (\n verify_environment_permission,\n get_data_table_environment_ids,\n get_data_doc_environment_ids,\n)\nfrom logic import board as logic\nfrom logic.board_permission import assert_can_read, assert_can_edit\nfrom models.board import Board\n\n\n@register(\n \"/board/\", methods=[\"GET\"],\n)\ndef get_my_boards(environment_id, filter_str=None):\n with DBSession() as session:\n return logic.get_user_boards(\n current_user.id,\n environment_id=environment_id,\n filter_str=filter_str,\n session=session,\n )\n\n\n@register(\n \"/board//\", methods=[\"GET\"],\n)\ndef get_board_by_id(board_id):\n with DBSession() as session:\n assert_can_read(board_id, session=session)\n board = Board.get(id=board_id, session=session)\n api_assert(board is not None, \"Invalid board id\", 404)\n verify_environment_permission([board.environment_id])\n return board.to_dict(extra_fields=[\"docs\", \"tables\", \"items\"])\n\n\n@register(\n \"/board/\", methods=[\"POST\"],\n)\ndef create_board(\n name, environment_id, owner_uid, description=None, public=None, favorite=False,\n):\n with DBSession() as session:\n verify_environment_permission([environment_id])\n return logic.create_board(\n name,\n environment_id,\n owner_uid,\n description,\n public,\n favorite,\n session=session,\n ).to_dict()\n\n\n@register(\n \"/board//\", methods=[\"PUT\"],\n)\ndef update_board(board_id, **fields):\n with DBSession() as session:\n assert_can_edit(board_id, session=session)\n board = Board.get(id=board_id, session=session)\n\n board = logic.update_board(id=board_id, **fields, session=session)\n return board.to_dict(extra_fields=[\"docs\", \"tables\", \"items\"])\n\n\n@register(\n \"/board//\", methods=[\"DELETE\"],\n)\ndef delete_board(board_id, **fields):\n with DBSession() as session:\n assert_can_edit(board_id, session=session)\n board = Board.get(id=board_id, session=session)\n api_assert(not board.board_type == \"favorite\", \"Cannot delete favorite\")\n\n Board.delete(board.id, session=session)\n\n\n@register(\"/board_item///board/\", methods=[\"GET\"])\ndef get_board_ids_from_board_item(item_type: str, item_id: int, environment_id: int):\n \"\"\"Given an potential item, find all possible board ids it can\n be related to\n\n Arguments:\n item_type {[str]} -- [data_doc or table]\n item_id {[int]} -- [Doc id or table id]\n environment_id {[int]} - [id of board environment]\n \"\"\"\n return logic.get_board_ids_from_board_item(item_type, item_id, environment_id)\n\n\n@register(\n \"/board////\", methods=[\"POST\"],\n)\ndef add_board_item(board_id, item_type, item_id):\n api_assert(item_type == \"data_doc\" or item_type == \"table\", \"Invalid item type\")\n\n with DBSession() as session:\n assert_can_edit(board_id, session=session)\n\n board = Board.get(id=board_id, session=session)\n # You can only add item in the same environment as the board\n item_env_ids = []\n if item_type == \"data_doc\":\n item_env_ids = get_data_doc_environment_ids(item_id, session=session)\n else:\n item_env_ids = get_data_table_environment_ids(item_id, session=session)\n\n api_assert(\n board.environment_id in item_env_ids,\n \"Board item must be in the same environment as the board\",\n )\n api_assert(\n logic.get_item_from_board(board_id, item_id, item_type, session=session)\n is None,\n \"Item already exists\",\n )\n\n return logic.add_item_to_board(board_id, item_id, item_type, session=session)\n\n\n@register(\n \"/board//move///\", methods=[\"POST\"],\n)\ndef move_board_item(board_id, from_index, to_index):\n if from_index != to_index:\n with DBSession() as session:\n assert_can_edit(board_id, session=session)\n logic.move_item_order(board_id, from_index, to_index, session=session)\n\n\n@register(\n \"/board////\", methods=[\"DELETE\"],\n)\ndef delete_board_item(board_id, item_type, item_id):\n api_assert(item_type == \"data_doc\" or item_type == \"table\", \"Invalid item type\")\n with DBSession() as session:\n assert_can_edit(board_id, session=session)\n\n board = Board.get(id=board_id, session=session)\n logic.remove_item_from_board(board.id, item_id, item_type, session=session)\n\n\n@register(\"/board/favorite/\", methods=[\"POST\"])\ndef get_or_create_favorite_board(environment_id):\n verify_environment_permission([environment_id])\n with DBSession() as session:\n board = logic.get_or_create_user_favorite_board(\n current_user.id, environment_id, session=session\n )\n return board.to_dict(extra_fields=[\"docs\", \"tables\"])\n","sub_path":"querybook/server/datasources/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"431634925","text":"# -*- coding: utf-8 -*-\n\n#\n# Licensed Materials - Property of esse.io\n#\n# (C) Copyright esse.io. 2015 All Rights Reserved\n#\n# Author: frank (frank@esse.io)\n#\n#\n\nfrom ws.resources.session import session_check\nfrom ws.request.base import BaseRequest\nfrom zencomm.log import log as logging\nfrom zencomm.api.error import ErrorInfo\nfrom zencomm.api import error_code as ErrCodes\nfrom zencomm.api.constants import APIREQ_ATTRIBUTE\nfrom zencomm.api import error_msg as ErrMsg\nfrom zencomm.common import get_resource_from_action\n\nLOG = logging.getLogger(__name__)\n\nclass SessionRequest(BaseRequest):\n\n def __init__(self, apireq):\n \"\"\" request with session information\n @param req_header: like path and request method\n @param req_param: request parameters \n \"\"\"\n self.method = apireq[\"headers\"][\"method\"]\n self.path = apireq[\"headers\"][\"path\"]\n self.is_secure = apireq[\"headers\"][\"is_secure\"]\n self.action = apireq[\"payload\"][\"action\"]\n self.payload = apireq[\"payload\"]\n self.sender = {}\n\n def __str__(self):\n return (('method:(%s) path(%s) params(%s)') % (self.method, \n self.path, \n self.payload))\n \n def _check_request_parameters(self):\n '''\n validate required parameters for session request\n '''\n resource_name = get_resource_from_action(self.action)\n # required_params = [resource_name, 'sid']\n required_params = [resource_name]\n # 1) set error for request if required parameters are not provided\n for rp in required_params:\n if rp not in self.payload:\n LOG.error(\"Invalid parameters for action %s, <%s> is needed\"\n % (self.action, rp))\n error = ErrorInfo(ErrCodes.INVALID_REQUEST_FORMAT,\n ErrMsg.ERRMSG_MISSED_REQUEST_PARAM,\n rp)\n self.set_error(error)\n return False\n\n # 2) check required resource parameters for action\n required_params = APIREQ_ATTRIBUTE[self.action]['required_params']\n for rp in required_params:\n if rp not in self.payload[resource_name]:\n LOG.error(\"Invalid parameters for action %s, <%s> is needed\"\n % (self.action, rp))\n error = ErrorInfo(ErrCodes.INVALID_REQUEST_FORMAT,\n ErrMsg.ERRMSG_MISSED_REQUEST_PARAM,\n rp)\n self.set_error(error)\n return False\n return True\n\n def _check_request_signature(self):\n ''' check signature of request\n @return: user id if succeeded and None if failed.\n '''\n # get sender's public api key from params\n \n # get sender's private api key from db\n \n # check whether checksum for request is correct\n \n # if correct, return sender's user id else None\n\n return None\n \n def _check_request_expired(self):\n '''\n check whether request has expired\n @return True if request is not expired\n '''\n return True\n\n def _check_request_session(self):\n ''' check sesion information of this request\n @return: user_id if session key is valid,\n else return None\n ''' \n # get session key from self.payload['sid']\n skey = self.payload[\"sid\"]\n user_id = session_check(skey)\n if user_id is None:\n LOG.error(\"unregistered session key [%s]\" % (skey))\n error = ErrorInfo(ErrCodes.USER_SESSION_EXPIRED)\n self.set_error(error)\n return None\n\n return user_id\n\n\n def validate(self):\n ''' validate sender's request\n '''\n\n if not self._check_request_parameters():\n return False\n\n return True\n\n if not self._check_request_expired():\n return False\n\n if not self._check_request_signature():\n return False\n\n user_id = self._check_request_session()\n if user_id is None:\n return False\n\n user = self._check_sender_access(user_id)\n if user is None:\n return False\n\n if not self._check_api_abuse(user_id):\n return False\n\n self.sender = self._get_sender(user)\n\n # management operation must be done over secure channel\n # else lower user's privilege\n if not self.is_secure:\n self.sender[\"privilege\"] = 'normal'\n self.sender[\"role\"] = 'normal'\n return True\n \n def build_request(self):\n ''' build request'''\n pass\n","sub_path":"ws/request/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"469816045","text":"from vnpy.app.cta_strategy.backtesting import BacktestingEngine, OptimizationSetting\nfrom vnpy.app.cta_strategy.strategies.tick_strategy import (\n TickStrategy,\n)\nfrom vnpy.app.cta_strategy.base import BacktestingMode\nfrom datetime import datetime\n\nengine = BacktestingEngine()\nengine.set_parameters(\n mode=BacktestingMode.TICK,\n vt_symbol=\"c2005.DCE\",\n interval=\"1m\",\n start=datetime(2019, 1, 1),\n end=datetime(2020, 3, 20),\n rate=0.3/10000,\n slippage=0.2,\n size=300,\n pricetick=0.2,\n capital=1_000_000,\n collection_name =\"c2005\"\n \n)\nengine.add_strategy(TickStrategy, {})\n\nengine.load_data()\nengine.run_backtesting()\ndf = engine.calculate_result()\nengine.calculate_statistics()\nengine.show_chart()","sub_path":"examples/cta_backtesting/demo_backtesting.py","file_name":"demo_backtesting.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"577349002","text":"\nwd = '/Users/songweizhi/Desktop/new'\n\ntransfer_file = open('%s/donor2recip.txt' % wd)\nnegatives = open('%s/HGT_candidates_ng_with_direction.txt' % wd)\n#predicted = open('%s/HGT_candidates_100.txt' % wd)\npredicted = open('%s/HGT_candidates_100_with_direction.txt' % wd)\n\n\ndonor_list = []\nfor each in transfer_file:\n each_split = each.strip().split('\\t')\n donor = each_split[0]\n if donor != 'donor_gene':\n donor_list.append(donor)\n\nprint('donor_list(%s):' % len(donor_list))\nprint('\\t'.join(donor_list))\n\n\nneg_A_to_B_list = []\nneg_B_to_A_list = []\nfor each_neg in negatives:\n #print(each_neg)\n each_neg_split = each_neg.strip().split('\\t')\n if each_neg.startswith('A'):\n neg_B_to_A_list.append(each_neg_split[0])\n if each_neg.startswith('B'):\n neg_A_to_B_list.append(each_neg_split[1])\n\n# neg_A_to_B_list_new = []\n# for each in neg_A_to_B_list:\n# if each not in set(neg_A_to_B_list).intersection(neg_B_to_A_list):\n# neg_A_to_B_list_new.append(each)\n#\n# neg_B_to_A_list_new = []\n# for each in neg_B_to_A_list:\n# if each not in set(neg_A_to_B_list).intersection(neg_B_to_A_list):\n# neg_B_to_A_list_new.append(each)\n\n\nprint('\\nneg_A_to_B_list(%s):' % len(neg_A_to_B_list))\nprint('\\t'.join(neg_A_to_B_list))\n\n# print('\\nneg_A_to_B_list_new(%s):' % len(neg_A_to_B_list_new))\n# print('\\t'.join(neg_A_to_B_list_new))\n\nprint('\\nneg_B_to_A_list(%s):' % len(neg_B_to_A_list))\nprint('\\t'.join(neg_B_to_A_list))\n\n# print('\\nneg_B_to_A_list_new(%s):' % len(neg_B_to_A_list_new))\n# print('\\t'.join(neg_B_to_A_list_new))\n\nprint('\\nintersection negative group(%s)' % len(set(neg_A_to_B_list).intersection(neg_B_to_A_list)))\nprint('\\t'.join(set(neg_A_to_B_list).intersection(neg_B_to_A_list)))\n\npredicted_A_to_B_list = []\npredicted_B_to_A_list = []\nfor each_hgt in predicted:\n each_hgt_split = each_hgt.strip().split('\\t')\n if each_hgt.startswith('A'):\n predicted_B_to_A_list.append(each_hgt_split[0])\n if each_hgt.startswith('B'):\n predicted_A_to_B_list.append(each_hgt_split[1])\nprint('\\npredicted_A_to_B_list(%s):' % len(predicted_A_to_B_list))\nprint('\\t'.join(predicted_A_to_B_list))\nprint('\\npredicted_B_to_A_list(%s):' % len(predicted_B_to_A_list))\nprint('\\t'.join(predicted_B_to_A_list))\nprint('\\nintersection predicted group(%s)' % len(set(predicted_A_to_B_list).intersection(predicted_B_to_A_list)))\nprint('\\t'.join(set(predicted_A_to_B_list).intersection(predicted_B_to_A_list)))\n\nvalidated_A_to_B_list = []\nfor each in predicted_A_to_B_list:\n if each in donor_list:\n validated_A_to_B_list.append(each)\nprint('\\nvalidated_A_to_B_list(%s):' % len(validated_A_to_B_list))\nprint('\\t'.join(validated_A_to_B_list))\n\n\nvalidated_B_to_A_list = []\nfor each in predicted_B_to_A_list:\n if each in donor_list:\n validated_B_to_A_list.append(each)\nprint('\\nvalidated_B_to_A_list(%s):' % len(validated_B_to_A_list))\nprint('\\t'.join(validated_B_to_A_list))\n\n\ndetected_in_both_direction = []\nnot_predicted_list = []\nfor each in donor_list:\n if (each in validated_A_to_B_list) and (each in validated_B_to_A_list):\n detected_in_both_direction.append(each)\n if (each not in predicted_A_to_B_list) and (each not in predicted_B_to_A_list):\n not_predicted_list.append(each)\n\nprint('\\nvalidated_in_both_direction(%s):' % len(detected_in_both_direction))\nprint('\\t'.join(detected_in_both_direction))\nprint('\\nnot_predicted_list(%s):' % len(not_predicted_list))\nprint('\\t'.join(not_predicted_list))","sub_path":"temporary/test_12.py","file_name":"test_12.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"373832817","text":"from data.Explain import Explain\nimport re\nimport collections\nimport numpy as np\nimport torch\n\n\ndef read_corpus(file_path):\n data = []\n with open(file_path, 'r', encoding='utf-8') as f:\n context = f.read()\n count = 1\n name = []\n item = []\n score = []\n comment = []\n polar = []\n for sentence in context.split('\\n'):\n if count == 1:\n name.append(sentence)\n elif count == 2:\n item.append(sentence)\n elif count == 3:\n score.append(sentence)\n elif count == 4:\n comment.append(sentence)\n elif count == 5:\n polar.append(sentence)\n count = (count + 1) % 6\n for explain in explain_in_opinion(comment):\n data.append(explain)\n return data\n\n\ndef read_slice_hotel_corpus(config):\n train_data = read_corpus(config.train_file)\n dev_data = read_corpus(config.dev_file)\n test_data = read_corpus(config.test_file)\n\n print('\\nloading data successfully')\n print('dataset:hotel')\n count = collections.Counter()\n for explain in train_data:\n count[explain.label] += 1\n print('train_data:\\nfac:{0}\\ncon:{1}\\nsug:{2}\\n'.format(count['fac'], count['con'], count['sug']))\n count = collections.Counter()\n for explain in dev_data:\n count[explain.label] += 1\n print('dev_data:\\nfac:{0}\\ncon:{1}\\nsug:{2}\\n'.format(count['fac'], count['con'], count['sug']))\n count = collections.Counter()\n for explain in test_data:\n count[explain.label] += 1\n print('test_data:\\nfac:{0}\\ncon:{1}\\nsug:{2}\\n'.format(count['fac'], count['con'], count['sug']))\n\n return train_data, dev_data, test_data\n\n\ndef read_slice_phone_corpus(file_path):\n data = []\n with open(file_path, 'r', encoding='utf-8') as f:\n context = f.read()\n product = []\n score = []\n comment = []\n polar = []\n for one_user in context.split('\\n\\n'):\n information_list = one_user.split('\\n')\n if len(information_list) > 3:\n product.append(information_list[0])\n score.append(information_list[1])\n comment.extend(information_list[2:-1])\n polar.append(information_list[-1])\n for explain in explain_in_opinion(comment):\n data.append(explain)\n train_data = data[:len(data) // 10 * 7]\n dev_data = data[len(data) // 10 * 7:len(data) // 10 * 9]\n test_data = data[len(data) // 10 * 9:]\n\n print('dataset:phone')\n count = collections.Counter()\n for explain in train_data:\n count[explain.label] += 1\n print('train_data:\\nfac:{0}\\ncon:{1}\\nsug:{2}\\n'.format(count['fac'], count['con'], count['sug']))\n count = collections.Counter()\n for explain in dev_data:\n count[explain.label] += 1\n print('dev_data:\\nfac:{0}\\ncon:{1}\\nsug:{2}\\n'.format(count['fac'], count['con'], count['sug']))\n count = collections.Counter()\n for explain in test_data:\n count[explain.label] += 1\n print('test_data:\\nfac:{0}\\ncon:{1}\\nsug:{2}\\n'.format(count['fac'], count['con'], count['sug']))\n\n return train_data, dev_data, test_data\n\n\ndef explain_in_opinion(comment):\n for statement in comment:\n pattern_fac = re.compile(r'(.*?)')\n pattern_rea = re.compile(r'(.*?)')\n pattern_con = re.compile(r'(.*?)')\n pattern_sug = re.compile(r'(.*?)')\n factor = pattern_fac.findall(statement)\n reality = pattern_rea.findall(statement)\n condition = pattern_con.findall(statement)\n suggestion = pattern_sug.findall(statement)\n if len(factor + reality) != 0:\n for i in factor:\n yield Explain(i, 'fac', statement)\n if condition:\n for i in condition:\n yield Explain(i, 'con', statement)\n if suggestion:\n for i in suggestion:\n yield Explain(i, 'sug', statement)\n\n\ndef batch_slice(data, batch_size):\n batch_num = int(np.ceil(len(data) / float(batch_size)))\n for i in range(batch_num):\n cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i\n sentences = [data[i * batch_size + b] for b in range(cur_batch_size)]\n\n yield sentences\n\n\ndef inst(data):\n return data\n\n\ndef data_iter(data, batch_size, shuffle=True):\n \"\"\"\n randomly permute data, then sort by source length, and partition into batches\n ensure that the length of sentences in each batch\n \"\"\"\n\n batched_data = []\n if shuffle: np.random.shuffle(data)\n batched_data.extend(list(batch_slice(data, batch_size)))\n\n if shuffle: np.random.shuffle(batched_data)\n for batch in batched_data:\n yield batch\n\n\ndef batch_data_variable(batch, vocab, config):\n batch_features = []\n batch_gold_label = []\n batch_features_length = []\n batch.sort(key=lambda explain: explain.length, reverse=True)\n for explain in batch:\n if explain.length < config.max_sentence_len:\n if not explain.regularized_seg:\n explain.regularized_seg = (\n explain.seg_list + [vocab._id2word[0]] * (config.max_sentence_len - explain.length))\n batch_features_length.append(explain.length)\n else:\n if not explain.regularized_seg:\n explain.regularized_seg = explain.seg_list[:config.max_sentence_len]\n batch_features_length.append(config.max_sentence_len)\n # word to id\n if not explain.tokens:\n explain.tokens = vocab.word2id(explain.regularized_seg)\n batch_features.append(explain.tokens)\n batch_gold_label.append(vocab.label2id(explain.label))\n batch_features = torch.LongTensor(batch_features).to(config.device)\n batch_features_length = torch.IntTensor(batch_features_length).to(config.device)\n batch_gold_label = torch.LongTensor(batch_gold_label).to(config.device)\n return batch_features, batch_features_length, batch_gold_label\n\n\ndef batch_pretrain_variable_sent_level(batch, vocab, config, tokenizer):\n batch_size = len(batch)\n max_bert_len = -1\n max_sent_num = max([len(data[0].sentences) for data in batch])\n max_sent_len = max([len(sent) for data in batch for sent in data[0].sentences])\n # if config.max_sent_len < max_sent_len:max_sent_len = config.max_sent_len\n batch_bert_indices = []\n batch_segments_ids = []\n batch_piece_ids = []\n for data in batch:\n sents = data[0].sentences\n doc_bert_indices = []\n doc_semgents_ids = []\n doc_piece_ids = []\n for sent in sents:\n sent = sent[:max_sent_len]\n bert_indice, segments_id, piece_id = tokenizer.bert_ids(' '.join(sent))\n doc_bert_indices.append(bert_indice)\n doc_semgents_ids.append(segments_id)\n doc_piece_ids.append(piece_id)\n assert len(piece_id) == len(sent)\n assert len(bert_indice) == len(segments_id)\n bert_len = len(bert_indice)\n if bert_len > max_bert_len: max_bert_len = bert_len\n batch_bert_indices.append(doc_bert_indices)\n batch_segments_ids.append(doc_semgents_ids)\n batch_piece_ids.append(doc_piece_ids)\n bert_indice_input = np.zeros((batch_size, max_sent_num, max_bert_len), dtype=int)\n bert_mask = np.zeros((batch_size, max_sent_num, max_bert_len), dtype=int)\n bert_segments_ids = np.zeros((batch_size, max_sent_num, max_bert_len), dtype=int)\n bert_piece_ids = np.zeros((batch_size, max_sent_num, max_sent_len, max_bert_len), dtype=float)\n\n for idx in range(batch_size):\n doc_bert_indices = batch_bert_indices[idx]\n doc_semgents_ids = batch_segments_ids[idx]\n doc_piece_ids = batch_piece_ids[idx]\n sent_num = len(doc_bert_indices)\n assert sent_num == len(doc_semgents_ids)\n for idy in range(sent_num):\n bert_indice = doc_bert_indices[idy]\n segments_id = doc_semgents_ids[idy]\n bert_len = len(bert_indice)\n piece_id = doc_piece_ids[idy]\n sent_len = len(piece_id)\n assert sent_len <= bert_len\n for idz in range(bert_len):\n bert_indice_input[idx, idy, idz] = bert_indice[idz]\n bert_segments_ids[idx, idy, idz] = segments_id[idz]\n bert_mask[idx, idy, idz] = 1\n for idz in range(sent_len):\n for sid, piece in enumerate(piece_id):\n avg_score = 1.0 / (len(piece))\n for tid in piece:\n bert_piece_ids[idx, idy, sid, tid] = avg_score\n\n bert_indice_input = torch.from_numpy(bert_indice_input)\n bert_segments_ids = torch.from_numpy(bert_segments_ids)\n bert_piece_ids = torch.from_numpy(bert_piece_ids).type(torch.FloatTensor)\n bert_mask = torch.from_numpy(bert_mask)\n\n return bert_indice_input, bert_segments_ids, bert_piece_ids, bert_mask\n","sub_path":"data/Dataloader.py","file_name":"Dataloader.py","file_ext":"py","file_size_in_byte":9045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"653005290","text":"#!/usr/bin/python3\n\"\"\"script that starts a Flask web application\"\"\"\nfrom flask import Flask\nfrom models import storage, State\nfrom flask import render_template\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef teardown(exceptions):\n storage.close()\n\n\n@app.route(\"/states_list\", strict_slashes=False)\ndef states_list():\n state_obj = storage.all(\"State\")\n states = list()\n for state, value in state_obj.items():\n states.append(value)\n return render_template(\"7-states_list.html\", states=states)\n\n\n@app.route(\"/cities_by_states\", strict_slashes=False)\ndef cities_by_states():\n state_obj = storage.all(\"State\")\n city_obj = storage.all(\"City\")\n states = list()\n cities = list()\n for state, value in state_obj.items():\n states.append(value)\n for city, value in city_obj.items():\n cities.append(value)\n return render_template(\"8-cities_by_states.html\",\n states=states,\n cities=cities)\n\n\n@app.route(\"/states\", strict_slashes=False)\n@app.route(\"/states/\", strict_slashes=False)\ndef show_states(id=None):\n state_obj = storage.all(\"State\")\n city_obj = storage.all(\"City\")\n states = list()\n cities = list()\n for state, value in state_obj.items():\n states.append(value)\n for city, value in city_obj.items():\n cities.append(value)\n\n state_id = \"State.{}\".format(id)\n if id is not None and state_id not in state_obj:\n states = None\n return render_template(\"9-states.html\",\n states=states,\n cities=cities,\n id=id)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"312040195","text":"# Assignment 1 - Managing students!\r\n#\r\n# CSC148 Fall 2014, University of Toronto\r\n# Instructor: David Liu\r\n# ---------------------------------------------\r\n# STUDENT INFORMATION\r\n#\r\n# List your group members below, one per line, in format\r\n# , \r\n# Nguyen Binh Nguyen, nguye571\r\n#\r\n#\r\n# ---------------------------------------------\r\n\"\"\"Interactive console for assignment.\r\n\r\nThis module contains the code necessary for running the interactive console.\r\nAs provided, the console does nothing interesting: it is your job to build\r\non it to fulfill all the given specifications.\r\n\r\nrun: Run the main interactive loop.\r\n\"\"\"\r\n\r\nfrom student import *\r\n\r\ndef run():\r\n \"\"\" (NoneType) -> NoneType\r\n\r\n Run the main interactive loop.\r\n \"\"\"\r\n\r\n database = Database()\r\n history = History()\r\n \r\n while True:\r\n command = input('')\r\n split_command = command.split()\r\n\r\n if command == 'exit':\r\n break\r\n \r\n elif command == '':\r\n print(\"Unrecognized command!\")\r\n history.push('')\r\n \r\n elif split_command[0] == 'undo':\r\n undo(split_command, database, history)\r\n \r\n elif len(split_command) == 3:\r\n if split_command[0] == 'create':\r\n create_student(split_command[2], database, history)\r\n \r\n elif split_command[0] == 'enrol':\r\n enrol(split_command[1], split_command[2], database, history)\r\n \r\n elif split_command[0] == 'drop':\r\n drop(split_command[1], split_command[2], database, history)\r\n \r\n elif split_command[0] == 'common-courses':\r\n common_courses(split_command, database, history)\r\n \r\n else:\r\n print('Unrecognized command!')\r\n history.push('')\r\n \r\n elif len(split_command) == 2:\r\n if split_command[0] == 'list-courses':\r\n list_courses(split_command[1], database, history)\r\n \r\n elif split_command[0] == 'class-list':\r\n class_list(split_command[1], database, history)\r\n \r\n else:\r\n print(\"Unrecognized command!\")\r\n history.push('')\r\n \r\n else:\r\n print(\"Unrecognized command!\")\r\n history.push('')\r\n \r\ndef create_student(student_name, database, history):\r\n try: \r\n database.get_student_object(student_name)\r\n print('ERROR: Student {} already exists.'.format(student_name))\r\n history.push('') \r\n except NonExistentStudentError:\r\n database.push_student(student_name, Student(student_name))\r\n history.push('create student {}'.format(student_name))\r\n\r\ndef enrol(student_name, course_code, database, history):\r\n try:\r\n student_object = database.get_student_object(student_name)\r\n course_object = database.get_course_object(course_code)\r\n \r\n try:\r\n student_object.enrol(course_code, course_object)\r\n history.push('enrol {} {}'.format(student_name, course_code))\r\n except FullCourseError:\r\n print('ERROR: Course {} is full.'.format(course_code))\r\n history.push('')\r\n except AlreadyTakingCourseError: \r\n history.push('')\r\n \r\n except NonExistentStudentError:\r\n print(\"ERROR: Student {} does not exist.\".format(student_name))\r\n history.push('')\r\n\r\ndef drop(student_name, course_code, database, history):\r\n try:\r\n student_object = database.get_student_object(student_name)\r\n course_object = database.get_course_object(course_code)\r\n \r\n try:\r\n student_object.drop(course_code, course_object)\r\n history.push('drop {} {}'.format(student_name, course_code))\r\n except NotTakingCourseError:\r\n history.push('')\r\n \r\n except NonExistentStudentError:\r\n print(\"ERROR: Student {} does not exist.\".format(student_name))\r\n history.push('')\r\n \r\ndef list_courses(student_name, database, history):\r\n try:\r\n student_object = database.get_student_object(student_name)\r\n print(student_object.list_courses())\r\n except NonExistentStudentError:\r\n print(\"ERROR: Student {} does not exist.\".format(student_name))\r\n history.push('') \r\n\r\ndef common_courses(split_command, database, history):\r\n try:\r\n student_1_object = database.get_student_object(split_command[1])\r\n student_2_object = database.get_student_object(split_command[2])\r\n print(student_1_object.common_courses(student_2_object))\r\n except NonExistentStudentError:\r\n for i in range(1, 3):\r\n try:\r\n database.get_student_object(split_command[i])\r\n except NonExistentStudentError:\r\n print(\"ERROR: Student {} does not exist.\".format(split_command[i]))\r\n history.push('')\r\n \r\ndef class_list(course_code, database, history):\r\n course_object = database.get_course_object(course_code)\r\n print(course_object.class_list())\r\n history.push('')\r\n \r\ndef undo(split_command, database, history):\r\n try:\r\n if len(split_command) == 1:\r\n history.undo(1, database)\r\n elif len(split_command) == 2:\r\n history.undo(split_command[1], database)\r\n else:\r\n print(\"Unrecognized command!\")\r\n history.push('')\r\n\r\n except ValueError:\r\n print('ERROR: {} is not a positive natural number.'.format(split_command[1]))\r\n except EndOfHistoryError:\r\n print('ERROR: No commands to undo.') \r\n\r\nif __name__ == '__main__':\r\n run()\r\n","sub_path":"sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"334047462","text":"'''\nSample Input:\n1\n5 2\n50 40 30 20 10\n\nSample Output:\n43.3333333333\n'''\nfor _ in range(int(input())):\n n,x=[int(i) for i in input().split()]\n s= [int(i) for i in input().split()]\n cg=s[0]\n credits=1\n for i in range(1,n):\n if s[i]>cg:\n cg=s[i]\n credits=i+1\n\n s[credits-1]=0\n while x>1:\n best=0\n for i in range(n):\n if s[i]!=0:\n temp=(cg*credits+(i+1)*s[i])/(credits+1+i)\n if temp>best:\n best=temp\n val=i\n credits+=val+1\n cg=best\n s[val]=0\n x-=1\n\n print(cg)\n\n\n","sub_path":"codechef/crisis.py","file_name":"crisis.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"396122821","text":"from django.conf.urls import url\nfrom OCR.views import addDocument,query,get_doc_status,fileupload\nfrom TextToSpeech.views import pollyexotel\n\nurlpatterns = [\n url(r'adddoc', addDocument),\n url(r'query', query),\n url(r'docstatus', get_doc_status),\n url(r'upload', fileupload),\n url(r'polly', pollyexotel), \n\n]\n","sub_path":"OCR/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"88475025","text":"\"\"\"\nСоздать вручную и заполнить несколькими строками текстовый файл,\nв котором каждая строка должна содержать данные о фирме:\nназвание, форма собственности, выручка, издержки.\n\nПример строки файла: firm_1 ООО 10000 5000.\n\nНеобходимо построчно прочитать файл, вычислить прибыль каждой компании, а также среднюю прибыль.\nЕсли фирма получила убытки, в расчет средней прибыли ее не включать.\n\nДалее реализовать список. Он должен содержать словарь с фирмами и их прибылями,\nа также словарь со средней прибылью.\nЕсли фирма получила убытки, также добавить ее в словарь (со значением убытков).\n\nПример списка: [{“firm_1”: 5000, “firm_2”: 3000, “firm_3”: 1000}, {“average_profit”: 2000}].\n\nИтоговый список сохранить в виде json-объекта в соответствующий файл.\nПример json-объекта:\n[{\"firm_1\": 5000, \"firm_2\": 3000, \"firm_3\": 1000}, {\"average_profit\": 2000}]\nПодсказка: использовать менеджер контекста.\n\"\"\"\nimport json\n\nresult_list = []\ndict_firms = {}\ninterim_list = []\ntry:\n with open(\"firms.txt\") as f_obj:\n average_profit = 0\n count = 0\n for f_str in f_obj:\n temp_firm = f_str.split()\n profit = int(temp_firm[2]) - int(temp_firm[3])\n if profit > 0:\n average_profit += profit\n count += 1\n firms_profit_tuple = tuple([temp_firm[0], profit])\n interim_list.append(firms_profit_tuple)\n dict_firms = dict(interim_list)\n result_list.append(dict_firms)\n result_list.append(dict(average_profit=average_profit / count))\n print(result_list)\n with open('firms.json', 'w') as file_json:\n json.dump(result_list, file_json)\nexcept IOError:\n print(\"Произошла ошибка ввода-вывода!\")\n","sub_path":"lesson5/homework5_6.py","file_name":"homework5_6.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"81149461","text":"#!/usr/bin/python\n\n##===--- run_iwyu_tests.py - include-what-you-use test framework driver ----===##\n#\n# The LLVM Compiler Infrastructure\n#\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n#\n##===-----------------------------------------------------------------------===##\n\n\"\"\"A test harness for IWYU testing.\"\"\"\n\n__author__ = 'dsturtevant@google.com (Dean Sturtevant)'\n\nimport glob\nimport os\nimport re\nimport sys\nimport unittest\nimport logging\nlogging.basicConfig(level=logging.INFO)\nclass Flags(object):\n def __init__(self): self.test_files = []\nFLAGS = Flags()\nimport iwyu_test_util\n\n\nTEST_ROOTDIR = 'tests'\n\n\ndef CheckAlsoExtension(extension):\n \"\"\"Return a suitable iwyu flag for checking files with the given extension.\"\"\"\n return '--check_also=\"%s\"' % os.path.join(TEST_ROOTDIR, '*' + extension)\n\n\ndef MappingFile(filename):\n \"\"\"Return a suitable iwyu flag for adding the given mapping file.\"\"\"\n return '--mapping_file=%s' % os.path.join(TEST_ROOTDIR, filename)\n\n\nclass OneIwyuTest(unittest.TestCase):\n \"\"\"Superclass for tests. A subclass per test-file is created at runtime.\"\"\"\n\n def setUp(self):\n # Iwyu flags for specific tests.\n # Map from filename to flag list. If any test requires special\n # iwyu flags to run properly, add an entry to the map with\n # key=cc-filename (relative to TEST_ROOTDIR), value=list of flags.\n flags_map = {\n 'backwards_includes.cc': [CheckAlsoExtension('-d*.h')],\n 'badinc.cc': [MappingFile('badinc.imp')],\n 'check_also.cc': [CheckAlsoExtension('-d1.h')],\n 'implicit_ctor.cc': [CheckAlsoExtension('-d1.h')],\n 'iwyu_stricter_than_cpp.cc': [CheckAlsoExtension('-autocast.h'),\n CheckAlsoExtension('-fnreturn.h'),\n CheckAlsoExtension('-typedefs.h'),\n CheckAlsoExtension('-d2.h')],\n 'keep_mapping.cc': [CheckAlsoExtension('-public.h'), \n MappingFile('keep_mapping.imp')],\n 'macro_location.cc': [CheckAlsoExtension('-d2.h')],\n 'non_transitive_include.cc': [CheckAlsoExtension('-d*.h'),\n '--transitive_includes_only'],\n 'no_h_includes_cc.cc': [CheckAlsoExtension('.c')],\n 'overloaded_class.cc': [CheckAlsoExtension('-i1.h')],\n 'prefix_header_includes_add.cc': ['--prefix_header_includes=add'],\n 'prefix_header_includes_keep.cc': ['--prefix_header_includes=keep'],\n 'prefix_header_includes_remove.cc': ['--prefix_header_includes=remove'],\n }\n prefix_headers = ['-include', 'tests/prefix_header_includes-d1.h',\n '-include', 'tests/prefix_header_includes-d2.h',\n '-include', 'tests/prefix_header_includes-d3.h',\n '-include', 'tests/prefix_header_includes-d4.h']\n clang_flags_map = {\n 'auto_type_within_template.cc': ['-std=c++11'],\n 'conversion_ctor.cc': ['-std=c++11'],\n 'ms_inline_asm.cc': ['-fms-extensions'],\n 'prefix_header_includes_add.cc': prefix_headers,\n 'prefix_header_includes_keep.cc': prefix_headers,\n 'prefix_header_includes_remove.cc': prefix_headers,\n }\n # Internally, we like it when the paths start with TEST_ROOTDIR.\n self._iwyu_flags_map = dict((os.path.join(TEST_ROOTDIR, k), v)\n for (k,v) in flags_map.items())\n self._clang_flags_map = dict((os.path.join(TEST_ROOTDIR, k), v)\n for (k,v) in clang_flags_map.items())\n\n def RunOneTest(self, filename):\n logging.info('Testing iwyu on %s', filename)\n # Split full/path/to/foo.cc into full/path/to/foo and .cc.\n (all_but_extension, _) = os.path.splitext(filename)\n (dirname, basename) = os.path.split(all_but_extension)\n # Generate diagnostics on all foo-* files (well, not other\n # foo-*.cc files, which is not kosher but is legal), in addition\n # to foo.h (if present) and foo.cc.\n all_files = (glob.glob('%s-*' % all_but_extension) +\n glob.glob('%s/*/%s-*' % (dirname, basename)) +\n glob.glob('%s.h' % all_but_extension) +\n glob.glob('%s/*/%s.h' % (dirname, basename)))\n files_to_check = [f for f in all_files if not f.endswith('.cc')]\n files_to_check.append(filename)\n\n # IWYU emits summaries with canonicalized filepaths, where all the\n # directory separators are set to '/'. In order for the testsuite to\n # correctly match up file summaries, we must canonicalize the filepaths\n # in the same way here.\n files_to_check = [f.replace(os.sep, '/') for f in files_to_check]\n\n iwyu_flags = self._iwyu_flags_map.get(filename, None)\n if iwyu_flags:\n logging.info('%s: Using iwyu flags %s', filename, str(iwyu_flags))\n\n clang_flags = self._clang_flags_map.get(filename, None)\n if clang_flags:\n logging.info('%s: Using clang flags %s', filename, str(clang_flags))\n\n iwyu_test_util.TestIwyuOnRelativeFile(self, filename, files_to_check,\n iwyu_flags, clang_flags, verbose=True)\n\n\ndef RegisterFilesForTesting():\n \"\"\"Create a test-class for every .cc file in TEST_ROOTDIR.\"\"\"\n module = sys.modules[__name__]\n filenames = []\n for (dirpath, dirs, files) in os.walk(TEST_ROOTDIR):\n filenames.extend(os.path.join(dirpath, f) for f in files\n if f.endswith('.cc'))\n if not filenames:\n sys.exit('No tests found in %s!' % os.path.abspath(TEST_ROOTDIR))\n for filename in filenames:\n basename = os.path.basename(filename[:-len('.cc')])\n class_name = re.sub('[^0-9a-zA-Z_]', '_', basename) # python-clean\n if class_name[0].isdigit(): # classes can't start with a number\n class_name = '_' + class_name\n while class_name in module.__dict__: # already have a class with that name\n class_name += '2' # just append a suffix :-)\n\n logging.info('Registering %s to test %s', class_name, filename)\n test_class = type(class_name, # class name\n (OneIwyuTest,), # superclass\n # and methods. f=filename is required for proper scoping\n {'runTest': lambda self, f=filename: self.RunOneTest(f)})\n setattr(module, test_class.__name__, test_class)\n\n\nif __name__ == '__main__':\n\n RegisterFilesForTesting()\n unittest.main()\n","sub_path":"tools/clang/tools/iwyu/run_iwyu_tests.py","file_name":"run_iwyu_tests.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"229060000","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 16 14:13:21 2019\n\n@author: xie\n\"\"\"\n\ndef climb(n):\n if n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n return climb(n-1)+climb(n-2)\n\nprint(climb(int(input())))\n","sub_path":"OJ自行练习/台阶走法.py","file_name":"台阶走法.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"390397979","text":"from copy import deepcopy\nfrom typing import Union, Callable\n\nimport pandas as pd\n\n\nclass CachedLoader:\n \"\"\"\n \"\"\"\n CACHE = {}\n\n def __init__(\n self,\n uid: str,\n method_name_or_callable: Union[str, Callable],\n *args,\n **kwargs\n ):\n \"\"\"\n Args:\n uid: the unique cache-key to associate with this data\n\n method_name_or_callable: use this to load the data\n\n *args: args to pass to the loading method/callable\n\n **kwargs: kwargs to pass to the loading callable\n\n\n This is a descriptor that provides data loading/caching capability. The\n main use case if for working in Jupyter notebooks where the same unchanged\n files or database queries must be performed over and over as you iterate\n your analysis.\n\n Here are examples of common use cases:\n\n\n class SimpleCSV:\n # Define the filename you want to load\n FILE_NAME = 'my_file.csv'\n\n # Use FILE_NAME as the uid in which to store data\n # read from FILE_NAME using pandas read_csv\n df = CachedLoader(FILE_NAME, pd.read_csv, FILE_NAME)\n\n class HugeCSV:\n # Define the filename you want to load\n FILE_NAME = 'my_file.csv'\n\n # Use FILE_NAME as the uid in which to store data\n # read from FILE_NAME using pandas read_csv\n df = CachedLoader(FILE_NAME, pd.read_csv, FILE_NAME)\n\n # By default, the cache will return a deepcopy of contents\n # For huge data, you may not want that. This allows you\n # to simply return a reference to the cache\n df.set_copy(False)\n\n\n class ProcessedCSV:\n # Define the filename you want to load\n FILE_NAME = 'my_file.csv'\n\n # Use FILE_NAME as the uid in which to store data\n # read from FILE_NAME using pandas read_csv\n df = CachedLoader(FILE_NAME, 'load_csv_file', FILE_NAME)\n\n def load_csv_file(self, file_name):\n # Load and transform the data\n df = pd.read_csv(file_name)\n df = df.head(2)\n return df\n\n class CachedQuery:\n # Define a cached\n json_blob = CachedLoader(\n 'json_blob',\n 'my_db_query',\n min_time=parse('1/1/2017'),\n max_time=parse('1/31/2017')\n )\n\n def my_db_query(self, min_time, max_time):\n return run_my_query(min_time, max_time)\n\n\n class SimpleJson:\n # Define the filename you want to load\n FILE_NAME = 'my_file.json'\n\n # Use FILE_NAME as the uid in which to store data\n # read from FILE_NAME using pandas read_csv\n df = CachedLoader(FILE_NAME, 'load_json', FILE_NAME)\n\n def load_json(self, file_name):\n import json\n with open(file_name) as f:\n blob = json.loads(f.read())\n return blob\n \"\"\"\n\n self.uid = uid\n self.method_name_or_callable = method_name_or_callable\n self.args = args\n self.kwargs = kwargs\n self._copy = True\n\n def _get_loader(self, obj):\n if isinstance(self.method_name_or_callable, str):\n loader = getattr(obj, self.method_name_or_callable)\n elif hasattr(self.method_name_or_callable, '__call__'):\n loader = self.method_name_or_callable\n else:\n raise ValueError('Loader must be either a string or a callable')\n return loader\n\n def __get__(self, obj, objtype):\n if self.uid not in self.CACHE:\n data = self._get_loader(obj)(*self.args, **self.kwargs)\n self.CACHE[self.uid] = data\n else:\n data = self.CACHE[self.uid]\n\n # You probably want to return copies to prevent mutating the cache\n if self._copy:\n if isinstance(data, pd.DataFrame):\n data = data.copy()\n else:\n data = deepcopy(data)\n return data\n\n def set_copy(self, true_or_false):\n self._copy = true_or_false\n\n def __set__(self, obj, val):\n raise RuntimeError('Atrribute cannot be set')\n\n def __delete__(self, obj):\n if self.uid in self.__class__.CACHE:\n del self.__class__.CACHE[self.uid]\n\n @classmethod\n def clear(cls):\n cls.CACHE = {}\n","sub_path":"easier/cached_loader.py","file_name":"cached_loader.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"333870833","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n\"\"\"\nContains the core classes of find_symptoms\n\"\"\"\n\nfrom pymongo import Connection\nfrom operator import itemgetter as _itemgetter\nimport heapq as _heapq\nimport re\nimport logging\n\nfrom db_settings import mongo_host, mongo_db, mongo_user, mongo_passwd\n\nlogger = logging.getLogger(__name__)\n\ndef get_mongo_kcsdw():\n conn = Connection(mongo_host)\n db = conn[mongo_db]\n isAuthenticated = db.authenticate(mongo_user, mongo_passwd)\n if isAuthenticated:\n logging.info(\"Successed to login the mongodb\")\n return db\n else:\n logging.error(\"Failed to login the mongodb\")\n return None\n\nclass Counter(dict):\n def __init__(self, iterable):\n for elem in iterable:\n self[elem] = self.get(elem, 0) + 1\n def most_common(self, n=None):\n if n is None:\n return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)\n return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))\n\nclass PatternHandler(object):\n def __init__(self, kcsdw):\n self.kcsdw = kcsdw \n\n def __most_common(self, solutions, top):\n counter = Counter(solutions)\n most_common = counter.most_common(top)\n retval = []\n for s in most_common:\n retval.append(s[0])\n return retval\n\n def __match_cases(self, pattern, case_collection):\n cases_count = case_collection.count()\n regex = re.compile(pattern)\n logging.info('Querying \"' + pattern + '\"')\n cursor = case_collection.find({\"description\": regex\n }).sort([('createddate', -1)])\n matched_cases_count = cursor.count() # number of matched cases\n logging.info(\"Found \" + str(matched_cases_count) + \" results\")\n matched_cases_ratio = \"{0:.4f}%\".format(\n float(matched_cases_count) / cases_count * 100)\n ten_latest_matched_cases = []\n all_sbrs = []\n for x in cursor[0:30]:\n subject = x[\"subject\"]\n if len(subject) > 65:\n subject = subject[:65].rsplit(' ', 1)[0] + \"...\"\n description = x[\"description\"].replace('\\n', ' ')\n if len(description) > 230:\n description = description[:230].rsplit(' ', 1)[0] + \"...\"\n sbr_groups = x[\"sbr_groups\"].split(\"#\")\n sbr_groups = [sbr for sbr in sbr_groups if len(sbr) != 0]\n all_sbrs.extend(sbr_groups)\n ten_latest_matched_cases.append((x[\"casenumber\"], x[\"caseid\"], subject, description, \", \".join(sbr_groups)))\n retval = {}\n retval[\"number_of_matched_cases\"] = matched_cases_count\n retval[\"ratio_of_matched_cases\"] = matched_cases_ratio\n retval[\"example_cases\"] = ten_latest_matched_cases\n retval[\"most_common_sbrs\"] = self.__most_common(all_sbrs, 1)\n return retval\n\n def __most_common_solutions(self, pattern, case_collection):\n regex = re.compile(pattern, re.IGNORECASE)\n cursor = case_collection.find({\"description\": regex, \"$where\":\n 'this.solution_ids.length > 0'})\n solutions = []\n for case in cursor[:]:\n solutions.extend(case[\"solution_ids\"])\n most_common_solutions = self.__most_common(solutions, 3)\n retval = {}\n retval[\"most_common_solutions\"] = most_common_solutions\n return retval\n\n def handle(self, pattern):\n case_collection = self.kcsdw.cases\n retval = {\"pattern\": pattern} \n match_cases_results = self.__match_cases(pattern, case_collection)\n retval.update(match_cases_results)\n most_common_solutions_results = self.__most_common_solutions(pattern, case_collection)\n logging.info(\"Top three most common solutions: \" + str(most_common_solutions_results))\n retval.update(most_common_solutions_results)\n return retval\n","sub_path":"symptom/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"314613111","text":"# -*- coding: UTF-8 -*-\n\nimport random\nimport numpy as np\n\nfrom dtw import dtw\nfrom numpy.linalg import norm\n\nfrom pyAudioAnalysis import audioBasicIO\nfrom pyAudioAnalysis import audioFeatureExtraction\n\n# from sklearn.svm import SVC\n# from sklearn.pipeline import Pipeline\n# from sklearn.preprocessing import StandardScaler\nfrom sklearn.mixture import GaussianMixture\n\n\ndef random_dtw_number(length=6):\n base_str = '0123456789'\n return ''.join(random.choice(base_str) for i in range(length))\n\n\n# def max_list(lt):\n# temp = 0\n# for i in lt:\n# if lt.count(i) > temp:\n# max_str = i\n# temp = lt.count(i)\n# return str(max_str)\n#\n#\n# def extract_dtw_mfcc(audio_file_path):\n# [Fs, k] = audioBasicIO.readAudioFile(audio_file_path)\n# mfcc = audioFeatureExtraction.stFeatureExtraction(k, Fs, 256, 80)[8:21]\n# return mfcc\n#\n#\n# def auth_by_dtw(dtw_mfcc_csv, audio_file_path):\n# mfcc_1 = np.loadtxt(dtw_mfcc_csv, delimiter=',')\n#\n# [Fs, k] = audioBasicIO.readAudioFile(audio_file_path)\n# mfcc_2 = audioFeatureExtraction.stFeatureExtraction(k, Fs, 256, 80)[8:21]\n#\n# dist, cost, acc_cost, path = dtw(mfcc_1.T, mfcc_2.T, dist=lambda x, y: norm(x - y, ord=1))\n#\n# print(dist)\n#\n# if dist < 2.5:\n# return True\n# return False\n#\n#\n# def auth_pipeline(dtw_features_path, audio_file_path, data_matrix_path, label_matrix_path):\n# dtw_features = np.loadtxt(dtw_features_path, delimiter=',')\n#\n# [Fs, k] = audioBasicIO.readAudioFile(audio_file_path)\n# audio_features = audioFeatureExtraction.stFeatureExtraction(k, Fs, 256, 80)[8:21]\n# # audio_features = audioFeatureExtraction.stFeatureExtraction_modified_2nd_edition(k, Fs, 0.050 * Fs, 0.025 * Fs)\n#\n# dist, cost, acc_cost, path = dtw(dtw_features.T, audio_features.T, dist=lambda x, y: norm(x - y, ord=1))\n# print(dist)\n# if dist < 2.9:\n# dtw_result = True\n# else:\n# dtw_result = False\n#\n# data_matrix = np.loadtxt(data_matrix_path, delimiter=',')\n# label_matrix = np.loadtxt(label_matrix_path, delimiter=',')\n#\n# print(data_matrix.shape, label_matrix.shape)\n#\n# clf_svm = Pipeline([\n# (\"scaler\", StandardScaler()),\n# # (\"svm_clf\", SVC(kernel=\"poly\", degree=10, C=5, coef0=100))\n# (\"svm_clf\", SVC(kernel='poly', gamma=5, C=1000))\n# ])\n#\n# clf_svm.fit(data_matrix, label_matrix)\n# # svm_result = max_list(clf_svm.predict(audio_features).tolist())\n# audio_features = audioFeatureExtraction.stFeatureExtraction(k, Fs, 0.050 * Fs, 0.025 * Fs)[8:21]\n# result = clf_svm.predict(audio_features.T)\n#\n# print(result.shape)\n#\n# result_in = result.tolist()\n# svm_result = max_list(result_in)\n#\n# if svm_result == '2.0':\n# real_man_result = True\n# else:\n# real_man_result = False\n#\n# print(dtw_result, real_man_result)\n#\n# return dtw_result, real_man_result\n\ndef auth_pipeline(dtw_features_path, audio_file_path, common_data_path, throat_data_path):\n dtw_features = np.loadtxt(dtw_features_path, delimiter=',')\n\n [Fs, k] = audioBasicIO.readAudioFile(audio_file_path)\n audio_features = audioFeatureExtraction.stFeatureExtraction(k, Fs, 256, 80)[8:21]\n\n dist, cost, acc_cost, path = dtw(dtw_features.T, audio_features.T, dist=lambda x, y: norm(x - y, ord=1))\n print(dist)\n if dist < 3.0:\n dtw_result = True\n else:\n dtw_result = False\n\n common_data = np.loadtxt(common_data_path, delimiter=',')\n throat_data = np.loadtxt(throat_data_path, delimiter=',')\n\n print(common_data.shape, throat_data.shape)\n\n common_mix = GaussianMixture(n_components=4, covariance_type='full')\n throat_mix = GaussianMixture(n_components=4, covariance_type='full')\n\n common_mix.fit(common_data)\n throat_mix.fit(throat_data)\n\n common_score = common_mix.score(audio_features.T)\n throat_score = throat_mix.score(audio_features.T)\n if common_score > throat_score:\n real_man_result = False\n else:\n real_man_result = True\n\n print(common_score, throat_score, dtw_result, real_man_result)\n\n return dtw_result, real_man_result\n","sub_path":"Project/speak_auth/app/api/dtw_auth.py","file_name":"dtw_auth.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"201299428","text":"from zope.component import queryAdapter\nfrom zope.component import getMultiAdapter\nfrom Products.Five import BrowserView\nfrom zExceptions import NotFound\n\nfrom Products.CMFPlone.interfaces.syndication import ISearchFeed\nfrom Products.CMFPlone.interfaces.syndication import IFeed\nfrom Products.CMFPlone.interfaces.syndication import IFeedSettings\nfrom Products.CMFPlone import PloneMessageFactory as _\n\nfrom z3c.form import form, button, field\nfrom plone.app.z3cform.layout import wrap_form\n\n\nclass FeedView(BrowserView):\n\n def feed(self):\n f = queryAdapter(self.context, IFeed)\n if f is None:\n raise NotFound\n return f\n\n def __call__(self):\n util = getMultiAdapter((self.context, self.request),\n name='syndication-util')\n if util.context_enabled(raise404=True):\n settings = IFeedSettings(self.context)\n if self.__name__ not in settings.feed_types:\n raise NotFound\n self.request.response.setHeader('Content-Type',\n 'application/atom+xml')\n return self.index()\n\n\nclass SearchFeedView(FeedView):\n def feed(self):\n f = queryAdapter(self.context, ISearchFeed)\n if f is None:\n raise NotFound\n return f\n\n def __call__(self):\n util = getMultiAdapter((self.context, self.request),\n name='syndication-util')\n if util.search_rss_enabled(raise404=True):\n self.request.response.setHeader('Content-Type',\n 'application/atom+xml')\n return self.index()\n\n\nclass SettingsForm(form.EditForm):\n label = _(u'heading_syndication_properties',\n default=u'Syndication Properties')\n description = _(u'description_syndication_properties',\n default=u'Syndication enables you to syndicate this folder so it can'\n u'be synchronized from other web sites.')\n fields = field.Fields(IFeedSettings)\n\n @button.buttonAndHandler(_(u'Save'), name='save')\n def handleSave(self, action):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return\n self.applyChanges(data)\nSettingsFormView = wrap_form(SettingsForm)\n","sub_path":"buildout-cache/eggs/Products.CMFPlone-5.0b2-py2.7.egg/Products/CMFPlone/browser/syndication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"266046754","text":"import os\nimport tensorflow as tf\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.75)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\nimport ember\nfrom keras.models import load_model\nimport pandas as pd\nimport numpy as np\nimport json\nfrom ember.features import PEFeatureExtractor\nfrom sklearn import preprocessing\nimport multiprocessing\nimport os\nimport pickle\n\ndef predict(model, scaler, raw_feature_path, num_samples):\n \"\"\" Get prediction of whether samples are malicious or benign\n\n Parameters:\n model: EmberNet model file\n scaler: Pickle file with scaler used for training EmberNet\n raw_feature_path: Filepath of samples\n num_samples: Number of samples in file\n\n Returns:\n scores: List of predictions for each sample (0 for benign, 1 for malicious)\n \"\"\"\n\n X_path = \"X_test_2.dat\"\n y_path = \"y_test_2.dat\"\n\n ember.vectorize_subset(X_path, y_path, [raw_feature_path], num_samples)\n\n #load X and y from .dat files\n ndim = PEFeatureExtractor.dim\n X = np.memmap(X_path, dtype=np.float32, mode=\"r\", shape=(num_samples, ndim))\n y = np.memmap(y_path, dtype=np.float32, mode=\"r\", shape=num_samples)\n \n scores = ember.predict_samplevector(model, scaler, X)\n scores = scores.flatten()\n return np.around(scores)\n\n\ndef score(model, scaler, raw_feature_path, actual_labels):\n \"\"\" Calculate TPR of model with provided samples\n\n Parameters:\n model: EmberNet model file\n scaler: Pickle file with scaler used for training EmberNet\n raw_feature_path: Filepath of samples\n actual_labels: Actual label of each sample (0 for benign, 1 for malicious)\n\n Returns:\n TPR (float): True Positive Rate of EmberNet based on provided samples\n \"\"\"\n\n num_samples = len(actual_labels)\n predicted_labels = predict(model, scaler, raw_feature_path, len(actual_labels))\n \n actual_labels = np.array(actual_labels)\n predicted_labels = np.array(predicted_labels)\n\n mal_pos = np.where(actual_labels == 1)\n mal_labels = actual_labels[mal_pos]\n pred_labels_for_mal = predicted_labels[mal_pos]\n diff = np.subtract(mal_labels, pred_labels_for_mal)\n false_negatives = np.count_nonzero(diff)\n total_positives = len(mal_pos[0])\n \n TPR = (total_positives - false_negatives) / total_positives\n\n return TPR\n\ndef retrain(model, scaler, raw_feature_path, num_samples, epochs, batch_size):\n \"\"\" Retrain model with new samples\n\n Parameters:\n model: EmberNet model file\n scaler: Pickle file with scaler used for training EmberNet\n raw_feature_path: Filepath of samples\n num_samples: Number of samples in file\n epochs: Number of training epochs \n batch_size: Batch size for training\n\n Returns:\n retrained_model: Model that has been retrained with new samples\n \"\"\"\n\n X_path = \"X_test_retrain.dat\"\n y_path = \"y_test_retrain.dat\"\n\n ember.vectorize_subset(X_path, y_path, [raw_feature_path], num_samples)\n\n #load X and y from .dat files\n ndim = PEFeatureExtractor.dim\n X = np.memmap(X_path, dtype=np.float32, mode=\"r\", shape=(num_samples, ndim))\n y = np.memmap(y_path, dtype=np.float32, mode=\"r\", shape=num_samples)\n \n retrained_model = ember.retrain_model(model, scaler, X, y, epochs, batch_size)\n retrained_model.save(\"./retrained_model.h5\")\n\n return retrained_model\n\n","sub_path":"test_ember_functions.py","file_name":"test_ember_functions.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"516827442","text":"import json\nimport os\nimport sys\nfrom my_messenger.common.errors import IncorrectDataReceivedError\n\n\ndef get_message(opened_socket, CONFIGS):\n \"\"\"\n Функция приёма сообщений от удалённых компьютеров.\n Принимает сообщения JSON, декодирует полученное сообщение\n и проверяет что получен словарь.\n :param opened_socket: сокет для передачи данных.\n :param CONFIGS: конфигурация.\n :return: словарь - сообщение.\n \"\"\"\n encoded_response = opened_socket.recv(CONFIGS.get('MAX_PACKAGE_LENGTH'))\n if isinstance(encoded_response, bytes):\n json_response = encoded_response.decode(CONFIGS.get('ENCODING'))\n response_dict = json.loads(json_response)\n if isinstance(response_dict, dict):\n return response_dict\n raise IncorrectDataReceivedError\n raise IncorrectDataReceivedError\n\n\ndef send_message(opened_socket, message, CONFIGS):\n \"\"\"\n Функция отправки словарей через сокет.\n Кодирует словарь в формат JSON и отправляет через сокет.\n :param opened_socket: сокет для передачи\n :param message: словарь для передачи\n :param CONFIGS: конфигурация.\n :return: ничего не возвращает\n \"\"\"\n json_message = json.dumps(message)\n encoded_message = json_message.encode(CONFIGS.get('ENCODING'))\n opened_socket.send(encoded_message)\n\n\ndef get_configs():\n \"\"\"\n функция получения словаря из json файла с настройками\n \"\"\"\n if not os.path.exists('common/configs.json'):\n print('Файл конфигурации не найден')\n sys.exit(1)\n with open('common/configs.json') as configs_file:\n CONFIGS = json.load(configs_file)\n return CONFIGS\n","sub_path":"my_messenger/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"603930825","text":"import tensorflow as tf\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Input, Conv2D, BatchNormalization, ReLU, Conv2DTranspose, MaxPool2D, Activation, Add, Multiply\nfrom tensorflow.keras.activations import relu\n\nclass Res_Attention_UNet(Model):\n def __init__(self):\n super(Res_Attention_UNet, self).__init__()\n\n initializer = tf.keras.initializers.GlorotNormal(seed=0)\n self.max_pool = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')\n self.conv_1 = self.Conv2dBatchLayer(32,3)\n self.conv_2 = self.Conv2dBatchLayer(64,3)\n self.conv_3 = self.Conv2dBatchLayer(128,3)\n self.conv_4 = self.Conv2dBatchLayer(256,3)\n self.conv_5 = self.Conv2dBatchLayer(512,3)\n self.conv_6 = self.Conv2dBatchLayer(1024,3)\n self.up_1 = Conv2DTranspose(512, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)\n self.x_conv1 = Conv2D(512, 1, strides=1, kernel_initializer=initializer)\n self.g_conv1 = Conv2D(512, 1, strides=1, kernel_initializer=initializer)\n self.conv_r1 = Conv2D(1, 1, strides=1, kernel_initializer=initializer)\n self.conv_7 = self.Conv2dBatchLayer(512,3)\n self.up_2 = Conv2DTranspose(256, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)\n self.x_conv2 = Conv2D(256, 1, strides=1, kernel_initializer=initializer)\n self.g_conv2 = Conv2D(256, 1, strides=1, kernel_initializer=initializer)\n self.conv_r2 = Conv2D(1, 1, strides=1, kernel_initializer=initializer)\n self.conv_8 = self.Conv2dBatchLayer(256,3)\n self.up_3 = Conv2DTranspose(128, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)\n self.x_conv3 = Conv2D(128, 1, strides=1, kernel_initializer=initializer)\n self.g_conv3 = Conv2D(128, 1, strides=1, kernel_initializer=initializer)\n self.conv_r3 = Conv2D(1, 1, strides=1, kernel_initializer=initializer)\n self.conv_9 = self.Conv2dBatchLayer(128,3)\n self.up_4 = Conv2DTranspose(64, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)\n self.x_conv4 = Conv2D(64, 1, strides=1, kernel_initializer=initializer)\n self.g_conv4 = Conv2D(64, 1, strides=1, kernel_initializer=initializer)\n self.conv_r4 = Conv2D(1, 1, strides=1, kernel_initializer=initializer)\n self.conv_10 = self.Conv2dBatchLayer(64,3)\n self.up_5 = Conv2DTranspose(32, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)\n self.x_conv5 = Conv2D(32, 1, strides=1, kernel_initializer=initializer)\n self.g_conv5 = Conv2D(32, 1, strides=1, kernel_initializer=initializer)\n self.conv_r5 = Conv2D(1, 1, strides=1, kernel_initializer=initializer)\n self.conv_11 = self.Conv2dBatchLayer(32, 3)\n self.last_conv = Conv2D(1, 3, strides=1, padding='same', kernel_initializer=initializer)\n\n\n def Conv2dBatchLayer(self, filters, kernel_size):\n initializer = tf.keras.initializers.GlorotNormal(seed=0)\n result = tf.keras.Sequential()\n result.add(Conv2D(filters, kernel_size, strides=1, padding='same', kernel_initializer=initializer))\n result.add(BatchNormalization())\n result.add(ReLU())\n return result\n\n def call(self, input):\n \n #96x96x3 -> 48x48x32\n conv1 = self.conv_1(input)\n down1 = self.max_pool(conv1)\n\n #48x48x32 -> 24x24x64\n conv2 = self.conv_2(down1)\n down2 = self.max_pool(conv2)\n\n #24x24x64 -> 12x12x128\n conv3 = self.conv_3(down2)\n down3 = self.max_pool(conv3)\n\n #12x12x128 -> 6x6x256\n conv4 = self.conv_4(down3)\n down4 = self.max_pool(conv4)\n\n #6x6x256 -> 3x3x512\n conv5 = self.conv_5(down4)\n down5 = self.max_pool(conv5)\n\n #3x3x512 -> 3x3x1024\n conv6 = self.conv_6(down5)\n\n #3x3x1024 -> 6x6x512\n up1 = self.up_1(conv6)\n x_conv1 = self.x_conv1(conv5)\n g_conv1 = self.g_conv1(up1)\n Add_1 = tf.keras.layers.add([x_conv1, g_conv1])\n actv_r1 = tf.keras.activations.relu(Add_1)\n conv_r1 = self.conv_r1(actv_r1)\n actv_s1 = tf.keras.activations.sigmoid(conv_r1)\n mult_1 = tf.keras.layers.multiply([conv5, actv_s1])\n concat1 = tf.keras.layers.concatenate([mult_1, up1], axis=3)\n conv7 = self.conv_7(concat1)\n\n #6x6x512 -> 12x12x246\n up2 = self.up_2(conv7)\n x_conv2 = self.x_conv2(conv4)\n g_conv2 = self.g_conv2(up2)\n Add_2 = tf.keras.layers.add([x_conv2, g_conv2])\n actv_r2 = tf.keras.activations.relu(Add_2)\n conv_r2 = self.conv_r2(actv_r2)\n actv_s2 = tf.keras.activations.sigmoid(conv_r2)\n mult_2 = tf.keras.layers.multiply([conv4, actv_s2])\n concat2 = tf.keras.layers.concatenate([mult_2, up2], axis=3)\n conv8 = self.conv_8(concat2)\n\n #12x12x246 -> 24x24x128\n up3 = self.up_3(conv8)\n x_conv3 = self.x_conv3(conv3)\n g_conv3 = self.g_conv3(up3)\n Add_3 = tf.keras.layers.add([x_conv3, g_conv3])\n actv_r3 = tf.keras.activations.relu(Add_3)\n conv_r3 = self.conv_r3(actv_r3)\n actv_s3 = tf.keras.activations.sigmoid(conv_r3)\n mult_3 = tf.keras.layers.multiply([conv3, actv_s3])\n concat3 = tf.keras.layers.concatenate([mult_3, up3], axis=3)\n conv9 = self.conv_9(concat3)\n\n #24x24x128 -> 48x48x64\n up4 = self.up_4(conv9)\n x_conv4 = self.x_conv4(conv2)\n g_conv4 = self.g_conv4(up4)\n Add_4 = tf.keras.layers.add([x_conv4, g_conv4])\n actv_r4 = tf.keras.activations.relu(Add_4)\n conv_r4 = self.conv_r4(actv_r4)\n actv_s4 = tf.keras.activations.sigmoid(conv_r4)\n mult_4 = tf.keras.layers.multiply([conv2, actv_s4])\n concat4 = tf.keras.layers.concatenate([mult_4, up4], axis=3)\n conv10 = self.conv_10(concat4)\n\n #48x48x64 -> 96x96x32\n up5 = self.up_5(conv10)\n x_conv5 = self.x_conv5(conv1)\n g_conv5 = self.g_conv5(up5)\n Add_5 = tf.keras.layers.add([x_conv5, g_conv5])\n actv_r5 = tf.keras.activations.relu(Add_5)\n conv_r5 = self.conv_r5(actv_r5)\n actv_s5 = tf.keras.activations.sigmoid(conv_r5)\n mult_5 = tf.keras.layers.multiply([conv1, actv_s5])\n concat5 = tf.keras.layers.concatenate([mult_5, up5], axis=3)\n conv11 = self.conv_11(concat5)\n\n #96x96x32 -> 96x96x1\n last_conv = self.last_conv(conv11) \n\n return tf.keras.layers.Subtract()([input, last_conv])","sub_path":"src/lib/models/Res_Attention_UNet.py","file_name":"Res_Attention_UNet.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"168617773","text":"N = int(input())\na = []\nb = list(map(int, input().split()))\n\nfor j in range(N):\n a.append(b[j])\n\ndp = [1]\nfor k in range(1,N):\n count = 0\n clist = [0]\n maxdp = dp[0]\n\n for j in range(k):\n if a[k] > a[j] :\n clist.append(j)\n\n if clist == [0]:\n dp.append(maxdp)\n else:\n for c in clist:\n if dp[c] > maxdp:\n maxdp = dp[c]\n\n dp.append(maxdp + 1) \n\nprint(max(dp))","sub_path":"BoJ/BoJ.11053.py","file_name":"BoJ.11053.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"499401512","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 20 17:24:29 2018\n\n@author: andrew\n\"\"\"\n\nimport combine_swarp\nimport sys\nimport os\nimport initialize\n\ndef COMBINE(path):\n '''Stacks science images into a high *S/N* template frame. Stacking method is the weighted median value of each pixel and is done by the AstrOmatic software ``SWarp`` (E. Bertin). Only the top third of science images with respect to seeing are included in the template.\n \n :param str path: Path of data file tree (contains the **configs**, **data**, **psf**, **residuals**, **sources**, **templates** directories). Use a comma-separated list for mapping to multiple datasets.\n :returns: Weighted median coaddition of all science images is outputted into the **templates** directory with the name convention of *StackMethod_NumberOfImagesInDataset.fits*.\n \n '''\n paths = (path.replace(' ','')).split(',')\n del path\n for path in paths:\n location = path + '/data'\n if os.path.exists(path):\n initialize.create_configs(path)\n combine_swarp.swarp(location)\n else:\n print(\"\\n-> Error: Unknown path entered\\n-> Please enter the path to an existing exposure time directory\\n-> Exiting...\\n\")\n sys.exit()\n\nif __name__ == '__main__':\n path = input(\"\\n-> Enter path to exposure time directory: \")\n COMBINE(path)\n","sub_path":"OasisPy/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"556074632","text":"# -*- coding: utf-8 -*_\n\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThis script reads information of a group from\na .yaml file and computes its coset table.\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nExample usage:\n\n python coset_enum_example.py filename [-std]\n\n filename: required, the .yaml file to be parsed.\n - std: optional, needs no value, if added then\n the output table is standardized.\n\"\"\"\nimport argparse\nimport yaml\nfrom fpgroup import FpGroup\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filename\", type=str, help=\"Input file name\")\n parser.add_argument(\"-std\", type=bool, default=True, help=\"Standardize the coset table or not\")\n parser.add_argument(\"-out\", metavar=\"-o\", type=str, default=None, help=\"output file name\")\n args = parser.parse_args()\n with open(args.filename, \"r\") as f:\n data = yaml.load(f)\n rels = data[\"relators\"]\n subg = data[\"subgroup-generators\"]\n name = data[\"name\"]\n G = FpGroup(rels, subg, name)\n G.compute(args.std)\n G.print_table(args.out)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/polytopes/tc_example.py","file_name":"tc_example.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"631556933","text":"import numpy as np\n\nfrom epidemics.country.country import EpidemicsCountry\n\nimport libepidemics #cpp backend\n\n\nclass Object(object):\n pass\n\n\nclass ModelBase(EpidemicsCountry):\n def __init__(self, **kwargs):\n\n super().__init__(**kwargs)\n\n def solve_ode(self, y0, T, t_eval, N, p):\n \n seir_int = libepidemics.country.seir_int\n dp = libepidemics.country.DesignParameters(N=N)\n cppsolver = seir_int.Solver(dp)\n \n beta = p[0]/5.2\n params = seir_int.Parameters(beta=beta, \n gamma=1/5.2, \n a=1./2.9, \n tact=p[1]-5.0, \n dtact=p[2], \n kbeta=p[3])\n\n s0, i0 = y0\n y0cpp = (s0, 0.0, i0, 0.0)\n initial = seir_int.State(y0cpp)\n\n cpp_res = cppsolver.solve_params_ad(params, initial, t_eval=t_eval, dt=0.01)\n\n yS = np.zeros(len(cpp_res))\n gradmu = []\n gradsig = []\n\n for idx, entry in enumerate(cpp_res):\n yS[idx] = entry.S().val()\n\n # Fix bad values\n yS[np.isnan(yS)] = 0\n\n # Create Solution Object\n sol = Object()\n sol.y = yS\n sol.gradMu = gradmu\n sol.gradSig = gradsig\n\n return sol\n","sub_path":"epidemics/country/seir_gui/model_base.py","file_name":"model_base.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"134289492","text":"import serial\r\nfrom dynamixel_sdk.port_handler import *\r\n\r\nPROTOCOL_TX_BUF_SIZE = 50\r\nPROTOCOL_RX_BUF_SIZE = 50\r\nMIGHTYZAP_PING = 0xf1\r\nMIGHTYZAP_READ_DATA = 0xf2\r\nMIGHTYZAP_WRITE_DATA = 0xf3\r\nMIGHTYZAP_REG_WRITE = 0xf4\r\nMIGHTYZAP_ACTION = 0xf5\r\nMIGHTYZAP_RESET = 0xf6\r\nMIGHTYZAP_RESTART = 0xf8\r\nMIGHTYZAP_FACTORY_RESET = 0xf9\r\nMIGHTYZAP_SYNC_WRITE = 0x73\r\n\r\nTxBuffer = [0] * PROTOCOL_TX_BUF_SIZE\r\nTxBuffer_index = 0\r\nRxBuffer = [0] * PROTOCOL_RX_BUF_SIZE\r\nRxBuffer_size = 0\r\n\r\nErollService = 0\r\nErollService_Instruction = 0\r\nErollService_ID = 0x00\r\nErollService_Addr = 0x00\r\nErollService_Size = 0x00\r\nErollService_ModelNum = 0x0000\r\nActuatorID = 0\r\nchecksum = 0\r\nMZap = serial.Serial()\r\n\r\n\r\ndef SetProtocalHeader():\r\n global TxBuffer_index\r\n global TxBuffer\r\n TxBuffer_index = 0\r\n TxBuffer[TxBuffer_index] = 0xff\r\n TxBuffer_index += 1\r\n TxBuffer[TxBuffer_index] = 0xff\r\n TxBuffer_index += 1\r\n TxBuffer[TxBuffer_index] = 0xff\r\n TxBuffer_index += 1\r\n TxBuffer[TxBuffer_index] = ActuatorID\r\n TxBuffer_index += 1\r\n\r\n\r\ndef SetProtocolInstruction(ins):\r\n global TxBuffer_index\r\n global TxBuffer\r\n global ErollService_Instruction\r\n\r\n TxBuffer_index = 5\r\n ErollService_Instruction = ins\r\n TxBuffer[TxBuffer_index] = ins\r\n TxBuffer_index += 1\r\n\r\n\r\ndef AddProtocolFactor(para):\r\n global TxBuffer_index\r\n global TxBuffer\r\n TxBuffer[TxBuffer_index] = para\r\n TxBuffer_index += 1\r\n\r\n\r\ndef SetProtocollength_checksum():\r\n global TxBuffer_index\r\n global TxBuffer\r\n global checksum\r\n checksum = 0\r\n start_i = 0\r\n\r\n TxBuffer[4] = TxBuffer_index - 4\r\n start_i = 3\r\n\r\n for i in range(start_i, TxBuffer_index):\r\n checksum += TxBuffer[i]\r\n TxBuffer[TxBuffer_index] = (checksum & 0x000000ff) ^ 0x000000ff\r\n TxBuffer_index += 1\r\n\r\n\r\ndef getID():\r\n global ActuatorID\r\n return ActuatorID;\r\n\r\n\r\ndef setID(ID):\r\n global ActuatorID\r\n ActuatorID = ID\r\n\r\n\r\ndef MightyZap(ID):\r\n global ErollService\r\n global ErollService_Instruction\r\n global ErollService_ID\r\n global ErollService_Addr\r\n global ErollService_Size\r\n global ErollService_Size\r\n\r\n ErollService = 0\r\n ErollService_Instruction = 0\r\n ErollService_ID = 0x00\r\n ErollService_Addr = 0x00\r\n ErollService_Size = 0x00\r\n ErollService_ModelNum = 0x0000\r\n\r\n setID(ID)\r\n\r\n\r\ndef OpenMightyZap(portname, BaudRate):\r\n MZap.port = portname\r\n MZap.baudrate = BaudRate\r\n MZap.timeout = 100\r\n\r\n MZap.open()\r\n\r\n\r\ndef CloseMightyZap():\r\n MZap.close()\r\n\r\n\r\ndef SendPacket(port):\r\n global TxBuffer_index\r\n global TxBuffer\r\n port.clearPort()\r\n port.writePort(TxBuffer[0:TxBuffer_index])\r\n # for i in range(0,TxBuffer_index):\r\n # port.writePort(TxBuffer[i]])\r\n # print(\"Tx\" , TxBuffer[0:TxBuffer_index])\r\n\r\n\r\ndef ReceivePacket(port, ID, size):\r\n global TxBuffer_index\r\n global TxBuffer\r\n global RxBuffer\r\n rxpacket = []\r\n timeout = 0\r\n temp = 0\r\n i = 0\r\n head_count = 0\r\n Time = port.getCurrentTime()\r\n nTime = port.getCurrentTime()\r\n while len(rxpacket) < 3:\r\n rxpacket.extend(port.readPort(1))\r\n # print(3)\r\n if len(rxpacket) > 0:\r\n if rxpacket[len(rxpacket) - 1] == b'\\xff':\r\n RxBuffer[head_count] = 0xff\r\n head_count += 1\r\n else:\r\n RxBuffer[0] = 0\r\n head_count = 0\r\n temp += 1\r\n nTime = port.getCurrentTime() - Time\r\n if nTime > 100:\r\n print(\"Time-out\", rxpacket, ID, nTime)\r\n return -1\r\n for i in range(3, size):\r\n rxpacket.extend(port.readPort(1))\r\n if (i + 1) == len(rxpacket):\r\n RxBuffer[i] = rxpacket[i]\r\n else:\r\n i = len(rxpacket) - 1\r\n # print(\"Rx\", rxpacket)\r\n return 0\r\n\r\n\r\ndef read_data(port, ID, addr, size):\r\n global MIGHTYZAP_READ_DATA\r\n\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_READ_DATA)\r\n AddProtocolFactor(addr)\r\n AddProtocolFactor(size)\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef ead_data_model_num(port, ID):\r\n global MIGHTYZAP_READ_DATA\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_READ_DATA)\r\n AddProtocolFactor(0);\r\n ErollService_Addr = 0\r\n AddProtocolFactor(2);\r\n ErollService_Size = 2\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef write_data(port, ID, addr, data, size):\r\n global MIGHTYZAP_WRITE_DATA\r\n i = 0\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_WRITE_DATA)\r\n AddProtocolFactor(addr)\r\n for i in range(0, size):\r\n AddProtocolFactor(data[i])\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef reg_write(port, ID, addr, data, size):\r\n global MIGHTYZAP_WRITE_DATA\r\n i = 0\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_REG_WRITE)\r\n AddProtocolFactor(addr)\r\n for i in range(0, size):\r\n AddProtocolFactor(data[i])\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef reg_write(port, addr, data, size):\r\n reg_write(port, ActuatorID, addr, data, size)\r\n\r\n\r\ndef action(port, ID):\r\n global MIGHTYZAP_WRITE_DATA\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_ACTION)\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef action(port):\r\n global ActuatorID\r\n action(port, ActuatorID)\r\n\r\n\r\ndef reset_write(port, ID, option):\r\n global MIGHTYZAP_RESET\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_RESET)\r\n AddProtocolFactor(option)\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef reset_write(port, option):\r\n global ActuatorID\r\n reset_write(port, ActuatorID, option)\r\n\r\n\r\ndef Restart(port, ID):\r\n global MIGHTYZAP_RESTART\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_RESTART)\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef Restart(port):\r\n global ActuatorID\r\n Restart(port, ActuatorID)\r\n\r\n\r\ndef factory_reset_write(port, ID, option):\r\n global MIGHTYZAP_FACTORY_RESET\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_FACTORY_RESET)\r\n AddProtocolFactor(option)\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef factory_reset_write(port, option):\r\n global ActuatorID\r\n factory_reset_write(port, ActuatorID, option)\r\n\r\n\r\ndef ping(port, ID):\r\n global MIGHTYZAP_PING\r\n setID(ID)\r\n SetProtocalHeader()\r\n SetProtocolInstruction(MIGHTYZAP_PING)\r\n SetProtocollength_checksum()\r\n SendPacket(port)\r\n\r\n\r\ndef goalPosition(port, bID, position):\r\n pByte = [0] * 2\r\n pByte[0] = (position & 0x00ff)\r\n pByte[1] = (position >> 8)\r\n write_data(port, bID, 0x86, pByte, 2)\r\n\r\n\r\ndef presentPosition(port, bID):\r\n global RxBuffer\r\n if port.is_using:\r\n return -1\r\n port.is_using = True\r\n read_data(port, bID, 0x8C, 2)\r\n rst = ReceivePacket(port, bID, 9)\r\n if rst == -1:\r\n # print(\"time-out\" , bID)\r\n port.is_using = False\r\n return 0\r\n port.is_using = False\r\n return (RxBuffer[7] * 256) + (RxBuffer[6])\r\n\r\n\r\ndef movingSpeed(port, bID, speed):\r\n pByte = [0] * 2\r\n\r\n pByte[0] = (byte)(speed & 0x00ff)\r\n pByte[1] = (byte)(speed >> 8)\r\n write_data(port, bID, 0x88, pByte, 2)\r\n\r\n\r\ndef forceEnable(port, bID, enable):\r\n pByte = [0] * 2\r\n\r\n if enable == 1:\r\n pByte[0] = 1\r\n else:\r\n pByte[0] = 0\r\n\r\n write_data(port, bID, 0x80, pByte, 1)\r\n SendPacket(port)\r\n\r\n\r\ndef SetErrorShutDownEnable(port, bID, flag):\r\n pByte = [0] * 1\r\n pByte[0] = flag\r\n write_data(port, bID, 0x12, pByte, 1)\r\n\r\n\r\ndef GetErrorShutDownEnable(port, bID):\r\n read_data(port, bID, 0x12, 1)\r\n ReceivePacket(port, bID, 8)\r\n return RxBuffer[6]\r\n\r\n\r\ndef SetErrorIndicatorEnable(port, bID, flag):\r\n pByte = [0] * 1\r\n pByte[0] = flag\r\n write_data(port, bID, 0x11, pByte, 1)\r\n\r\n\r\ndef GetErrorIndicatorEnable(port, bID):\r\n read_data(port, bID, 0x11, 1)\r\n ReceivePacket(port, bID, 8)\r\n return RxBuffer[6]\r\n\r\n\r\ndef ReadError(port, bID):\r\n ping(port, bID)\r\n ReceivePacket(port, bID, 7)\r\n return RxBuffer[5]\r\n\r\n\r\ndef write_Addr(port, bID, addr, size, data):\r\n if size == 2:\r\n pByte = [0] * 2\r\n pByte[0] = (data & 0x00ff)\r\n pByte[1] = (data // 256)\r\n write_data(port, bID, addr, pByte, 2)\r\n else:\r\n pByte = [0] * 1\r\n pByte[0] = data\r\n write_data(port, bID, addr, pByte, 1)\r\n\r\n\r\ndef read_Addr(port, bID, addr, size):\r\n if size == 2:\r\n read_data(port, bID, addr, 2)\r\n ReceivePacket(port, bID, 9)\r\n return (RxBuffer[7] * 256) + RxBuffer[6]\r\n else:\r\n read_data(port, bID, addr, 1)\r\n ReceivePacket(port, bID, 8)\r\n return RxBuffer[6]","sub_path":"device_module/flipper_module/PythonLibMightyZap.py","file_name":"PythonLibMightyZap.py","file_ext":"py","file_size_in_byte":8870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"41408665","text":"# Copyright [2015] Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nIntegration tests for Redis datastore.\nAPIs tested for Redis are:\n1. create\n2. restart\n3. resize-volume\n4. resize-instance\n5. delete\n6. cluster-create\n7. cluster-delete\n\"\"\"\n\nfrom proboscis import asserts\nfrom proboscis.decorators import before_class\nfrom proboscis import SkipTest\nfrom proboscis import test\nfrom troveclient.compat import exceptions\n\nfrom trove.common import cfg\nfrom trove.common.utils import poll_until\nfrom trove.tests.api.instances import EPHEMERAL_SUPPORT\nfrom trove.tests.api.instances import GROUP_START_SIMPLE\nfrom trove.tests.api.instances import instance_info\nfrom trove.tests.api.instances import WaitForGuestInstallationToFinish\nfrom trove.tests.config import CONFIG\nfrom trove.tests.util.check import TypeCheck\nfrom trove.tests.util import create_dbaas_client\n\nCONF = cfg.CONF\n\nREDIS_GROUP = \"dbaas.api.redis\"\nTIMEOUT = 2300\nSLEEP_TIME = 60\n\n\n@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[REDIS_GROUP],\n runs_after=[WaitForGuestInstallationToFinish])\nclass RedisTest(object):\n \"\"\"Tests Redis Datastore Features.\"\"\"\n\n @before_class\n def setUp(self):\n self.instance = instance_info\n self.rd_client = create_dbaas_client(self.instance.user)\n self.report = CONFIG.get_report()\n\n def _find_status(self, rd_client, instance_id, expected_status):\n \"\"\"Tracks instance status, until it gets to expected_status.\"\"\"\n instance = rd_client.instances.get(instance_id)\n self.report.log(\"Instance info %s.\" % instance._info)\n if instance.status == expected_status:\n self.report.log(\"Instance: %s is ready.\" % instance_id)\n return True\n else:\n return False\n\n @test\n def test_instance_restart(self):\n \"\"\"Tests the restart API.\"\"\"\n if not getattr(self, 'instance', None):\n raise SkipTest(\n \"Skipping this test since instance is not available.\")\n\n self.rd_client = create_dbaas_client(self.instance.user)\n self.rd_client.instances.restart(self.instance.id)\n\n asserts.assert_equal(202, self.rd_client.last_http_code)\n test_instance = self.rd_client.instances.get(self.instance.id)\n asserts.assert_equal(\"REBOOT\", test_instance.status)\n\n poll_until(lambda: self._find_status(self.rd_client,\n self.instance.id, \"ACTIVE\"),\n sleep_time=SLEEP_TIME, time_out=TIMEOUT)\n self.report.log(\"Restarted Instance: %s.\" % self.instance.id)\n\n @test(depends_on=[test_instance_restart], enabled=False)\n def test_instance_resize_volume(self):\n \"\"\"Tests the resize volume API.\"\"\"\n old_volume_size = int(instance_info.volume['size'])\n new_volume_size = old_volume_size + 1\n if not getattr(self, 'instance', None):\n raise SkipTest(\n \"Skipping this test since instance is not available.\")\n\n self.rd_client = create_dbaas_client(self.instance.user)\n self.rd_client.instances.resize_volume(self.instance.id,\n new_volume_size)\n\n asserts.assert_equal(202, self.rd_client.last_http_code)\n test_instance = self.rd_client.instances.get(self.instance.id)\n asserts.assert_equal(\"RESIZE\", test_instance.status)\n\n poll_until(lambda: self._find_status(self.rd_client,\n self.instance.id, \"ACTIVE\"),\n sleep_time=SLEEP_TIME, time_out=TIMEOUT)\n\n instance = self.rd_client.instances.get(self.instance.id)\n asserts.assert_equal(instance.volume['size'], new_volume_size)\n self.report.log(\"Resized Volume for Instance ID: %s to %s.\" % (\n self.instance.id, new_volume_size))\n\n @test(depends_on=[test_instance_resize_volume])\n def test_instance_resize_flavor(self):\n \"\"\"Tests the resize instance/flavor API.\"\"\"\n\n if EPHEMERAL_SUPPORT:\n flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name',\n 'eph.rd-smaller')\n else:\n flavor_name = CONFIG.values.get('instance_bigger_flavor_name',\n 'm1.small')\n flavors = self.instance.dbaas.find_flavors_by_name(flavor_name)\n new_flavor = flavors[0]\n\n asserts.assert_true(new_flavor is not None,\n \"Flavor '%s' not found!\" % flavor_name)\n\n if not getattr(self, 'instance', None):\n raise SkipTest(\n \"Skipping this test since instance is not available.\")\n\n self.rd_client = create_dbaas_client(self.instance.user)\n self.rd_client.instances.resize_instance(self.instance.id,\n new_flavor.id)\n\n asserts.assert_equal(202, self.rd_client.last_http_code)\n test_instance = self.rd_client.instances.get(self.instance.id)\n asserts.assert_equal(\"RESIZE\", test_instance.status)\n\n poll_until(lambda: self._find_status(self.rd_client,\n self.instance.id, \"ACTIVE\"),\n sleep_time=SLEEP_TIME, time_out=TIMEOUT)\n\n test_instance = self.rd_client.instances.get(self.instance.id)\n asserts.assert_equal(int(test_instance.flavor['id']), new_flavor.id)\n self.report.log(\"Resized Flavor for Instance ID: %s to %s.\" % (\n self.instance.id, new_flavor.id))\n\n @test(depends_on=[test_instance_resize_flavor])\n def test_instance_delete(self):\n \"\"\"Tests the instance delete.\"\"\"\n if not getattr(self, 'instance', None):\n raise SkipTest(\n \"Skipping this test since instance is not available.\")\n\n self.rd_client = create_dbaas_client(self.instance.user)\n self.rd_client.instances.delete(self.instance.id)\n\n asserts.assert_equal(202, self.rd_client.last_http_code)\n test_instance = self.rd_client.instances.get(self.instance.id)\n asserts.assert_equal(\"SHUTDOWN\", test_instance.status)\n\n def _poll():\n try:\n instance = self.rd_client.instances.get(self.instance.id)\n self.report.log(\"Instance info %s\" % instance._info)\n asserts.assert_equal(\"SHUTDOWN\", instance.status)\n return False\n except exceptions.NotFound:\n self.report.log(\"Instance has gone.\")\n asserts.assert_equal(404, self.rd_client.last_http_code)\n return True\n\n poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)\n self.report.log(\"Deleted Instance ID: %s \" % self.instance.id)\n\n @test(depends_on=[test_instance_delete])\n def test_create_cluster_successfuly(self):\n valid_request_body = [{\"flavorRef\": self.instance.dbaas_flavor_href,\n 'volume': {'size': 1}}] * 2\n\n self.cluster = self.rd_client.clusters.create(\n \"test_cluster\", self.instance.dbaas_datastore,\n self.instance.dbaas_datastore_version,\n instances=valid_request_body)\n\n with TypeCheck('Cluster', self.cluster) as check:\n check.has_field(\"id\", basestring)\n check.has_field(\"name\", basestring)\n check.has_field(\"datastore\", dict)\n check.has_field(\"instances\", list)\n check.has_field(\"links\", list)\n check.has_field(\"created\", unicode)\n check.has_field(\"updated\", unicode)\n for instance in self.cluster.instances:\n isinstance(instance, dict)\n asserts.assert_is_not_none(instance['id'])\n asserts.assert_is_not_none(instance['links'])\n asserts.assert_is_not_none(instance['name'])\n asserts.assert_equal(200, self.rd_client.last_http_code)\n\n def _cluster_is_active(self):\n cluster = self.rd_client.clusters.get(self.cluster.id)\n cluster_instances = [\n self.rd_client.instances.get(instance['id'])\n for instance in cluster.instances]\n self.report.log(\"Cluster info %s.\" % cluster._info)\n self.report.log(\"Cluster instances info %s.\" % cluster_instances)\n if cluster.task['name'] == \"NONE\":\n\n if [\"ERROR\"] * len(cluster_instances) == [\n str(instance.status) for instance in cluster_instances]:\n self.report.log(\"Cluster provisioning failed.\")\n asserts.fail(\"Cluster provisioning failed.\")\n\n if [\"ACTIVE\"] * len(cluster_instances) == [\n str(instance.status) for instance in cluster_instances]:\n self.report.log(\"Cluster is ready.\")\n return True\n else:\n asserts.assert_not_equal(\n [\"ERROR\"] * len(cluster_instances),\n [instance.status\n for instance in cluster_instances])\n self.report.log(\"Continue polling, cluster is not ready yet.\")\n\n @test(depends_on=[test_create_cluster_successfuly])\n def test_wait_until_cluster_is_active(self):\n if not getattr(self, 'cluster', None):\n raise SkipTest(\n \"Skipping this test since cluster is not available.\")\n\n poll_until(self._cluster_is_active,\n sleep_time=SLEEP_TIME, time_out=TIMEOUT)\n self.report.log(\"Created cluster, ID = %s.\" % self.cluster.id)\n\n @test(depends_on=[test_wait_until_cluster_is_active])\n def test_cluster_grow(self):\n\n if not getattr(self, 'cluster', None):\n raise SkipTest(\n \"Skipping this test since cluster is not available.\")\n\n beginning_instance_count = len(self.cluster.instances)\n\n valid_request_body = [\n {\"name\": \"foo\", \"flavorRef\": self.instance.dbaas_flavor_href,\n 'volume': {'size': 1}},\n {\"name\": \"bar\", \"flavorRef\": self.instance.dbaas_flavor_href,\n 'volume': {'size': 1}}]\n\n self.cluster = self.rd_client.clusters.grow(self.cluster.id,\n valid_request_body)\n\n asserts.assert_equal(2, len(self.cluster.instances)\n - beginning_instance_count)\n asserts.assert_equal(202, self.rd_client.last_http_code)\n\n poll_until(self._cluster_is_active,\n sleep_time=SLEEP_TIME, time_out=TIMEOUT)\n\n @test(depends_on=[test_cluster_grow])\n def test_cluster_shrink(self):\n\n if not getattr(self, 'cluster', None):\n raise SkipTest(\n \"Skipping this test since cluster is not available.\")\n\n foo_instance = None\n for instance in self.cluster.instances:\n if instance['name'] == 'foo':\n foo_instance = instance\n break\n asserts.assert_is_not_none(foo_instance, \"Could not find foo instance\")\n\n beginning_instance_count = len(self.cluster.instances)\n\n valid_request_body = [{\"id\": foo_instance['id']}]\n\n self.cluster = self.rd_client.clusters.shrink(self.cluster.id,\n valid_request_body)\n\n asserts.assert_equal(-1, len(self.cluster.instances)\n - beginning_instance_count)\n asserts.assert_equal(202, self.rd_client.last_http_code)\n\n poll_until(self._cluster_is_active,\n sleep_time=SLEEP_TIME, time_out=TIMEOUT)\n\n @test(depends_on=[test_create_cluster_successfuly],\n runs_after=[test_cluster_shrink])\n def test_cluster_delete(self):\n\n if not getattr(self, 'cluster', None):\n raise SkipTest(\n \"Skipping this test since cluster is not available.\")\n\n self.rd_client.clusters.delete(self.cluster.id)\n asserts.assert_equal(202, self.rd_client.last_http_code)\n\n def _poll():\n try:\n cluster = self.rd_client.clusters.get(\n self.cluster.id)\n self.report.log(\"Cluster info %s\" % cluster._info)\n asserts.assert_equal(\"DELETING\", cluster.task['name'])\n return False\n except exceptions.NotFound:\n self.report.log(\"Cluster is not available.\")\n asserts.assert_equal(404, self.rd_client.last_http_code)\n return True\n\n poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)\n self.report.log(\"Deleted cluster: %s.\" % self.cluster.id)\n","sub_path":"trove/tests/api/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":13060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"530068213","text":"from django.shortcuts import render\nfrom django.template import Context, RequestContext\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom .models import *\n# Create your views here.\nfrom django.http import HttpResponse\n\nfrom django.db import connection\nimport re\n\ndef index(request):\n return HttpResponse(\"Hello, world.\")\n\n# Insert API\ndef insert(request):\n sys_id=request.GET.get('sys_id')\n machine_name=request.GET.get('machine_name')\n mac_addr=request.GET.get('mac_addr')\n ip_addr=request.GET.get('ip_addr')\n ram=request.GET.get('ram')\n int_name=request.GET.get('int_name')\n int_type=request.GET.get('int_type')\n pwrstatus=request.GET.get('pwrstatus')\n cpu_cores=request.GET.get('cpu_cores')\n cpu_type=request.GET.get('cpu_type')\n storage=request.GET.get('storage')\n w='([a-fA-F0-9]{2}[:|\\-]?){6}'\n op='^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$'\n p = dev_info(sys_id=sys_id,machine_name=machine_name,mac_addr=mac_addr,ram=ram,ip_addr=ip_addr,int_name=int_name,int_type=int_type,pwrstatus=pwrstatus,cpu_cores=cpu_cores,cpu_type=cpu_type,storage=storage,info_type=\"devices\")\n a= re.compile(op).search(ip_addr)\n b=re.compile(w).search(mac_addr)\n if a and b:\n \n try: \n p.save() \n return HttpResponse(\"insert success\")\n except:\n return HttpResponse(\"insert not success\")\n else:\n\n return HttpResponse(\"format may be not correct\")\n\n#Update API\ndef update(request):\n sys_id=request.GET.get('sys_id')\n machine_name=request.GET.get('machine_name')\n mac_addr=request.GET.get('mac_addr')\n ip_addr=request.GET.get('ip_addr')\n ram=request.GET.get('ram')\n int_name=request.GET.get('int_name')\n int_type=request.GET.get('int_type')\n pwrstatus=request.GET.get('pwrstatus')\n cpu_cores=request.GET.get('cpu_cores')\n cpu_type=request.GET.get('cpu_type')\n storage=request.GET.get('storage')\n w='([a-fA-F0-9]{2}[:|\\-]?){6}'\n op='^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$'\n a= re.compile(op).search(ip_addr)\n b=re.compile(w).search(mac_addr)\n dev=\"devices\"\n if a and b:\n\n try:\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"update appserver_dev_info set machine_name=%s,mac_addr=%s,ip_addr=%s,ram=%s,int_name=%s,int_type=%s,pwrstatus=%s,cpu_cores=%s,cpu_type=%s,storage=%s,info_type=%s where sys_id=%s\"\"\",[machine_name,mac_addr,ip_addr,ram,int_name,int_type,pwrstatus,cpu_cores,cpu_type,storage,dev,sys_id] ) \n \n return HttpResponse(\"This is the update api\")\n except:\n return HttpResponse(\"not success\")\n else:\n return HttpResponse(\"check format\")\n#Delete API\ndef delete(request):\n machine_name=request.GET.get('machine_name')\n try:\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"delete from appserver_dev_info where machine_name=%s\"\"\",[machine_name] ) \n \n return HttpResponse(\"This is the delete api\")\n except:\n return HttpResponse(\"not success\")\n\n\n\ndef search(request):\n \n machine_name=request.GET.get('machine_name')\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"select * from appserver_dev_info where machine_name= %s \"\"\",[machine_name] ) \n row = cursor.fetchall()\n columns = cursor.description\n q=[]\n q.append([x[0] for x in columns])\n s=\"\"\n f=[]\n x=[]\n for i in row:\n f.append(i)\n for ev in range(len(f)):\n u=0\n for aall in f[ev]:\n ll= str(q[0][u])+\":\"+ str(aall) + \" \" \n x.append(ll)\n u=u+1\n \n \n \n \n return HttpResponse(x)\n \n \n \n\n\ndef callback(request):\n\n verification_code = request.GET.get('verification_code')\n userid = request.GET.get('userid')\n \n\n context = {\n 'verification_code': verification_code,\n 'userid': userid,\n }\n\n return render_to_response('appserver/callback.html', context)\n","sub_path":"anmgr1/Device-Inventory-Alpha/anm/appserver/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"68318923","text":"from __future__ import absolute_import\n\nimport os\nimport json\n\nimport mock\n\nfrom solvebio.cli import main\nfrom .helper import SolveBioTestCase\nfrom solvebio import DatasetTemplate\nfrom solvebio.test.client_mocks import fake_vault_all\nfrom solvebio.test.client_mocks import fake_object_all\nfrom solvebio.test.client_mocks import fake_dataset_create\nfrom solvebio.test.client_mocks import fake_dataset_tmpl_create\nfrom solvebio.test.client_mocks import fake_dataset_tmpl_retrieve\n\n\nclass CLITests(SolveBioTestCase):\n\n def test_whoami(self):\n email, token = main.main(['whoami'])\n self.assertEqual(token, os.environ.get('SOLVEBIO_API_KEY'))\n\n @mock.patch('solvebio.resource.Vault.all')\n @mock.patch('solvebio.resource.Object.all')\n @mock.patch('solvebio.resource.Dataset.create')\n def test_create_dataset(self, DatasetCreate, ObjectAll, VaultAll):\n DatasetCreate.side_effect = fake_dataset_create\n ObjectAll.side_effect = fake_object_all\n VaultAll.side_effect = fake_vault_all\n\n args = ['create-dataset', 'test-dataset',\n '--capacity', 'small',\n '--vault', 'test',\n '--path', '/'] # noqa\n ds = main.main(args)\n self.assertEqual(ds.name, 'test-dataset')\n self.assertEqual(ds.path, '/test-dataset')\n\n def _validate_tmpl_fields(self, fields):\n for f in fields:\n if f.name == 'name':\n self.assertEqual(f.entity_type, 'gene')\n elif f.name == 'variants':\n self.assertEqual(f.entity_type, 'variant')\n self.assertEqual(f.is_list, True)\n self.assertEqual(f.data_type, 'auto')\n elif f.name == 'aliases':\n self.assertEqual(f.data_type, 'string')\n\n @mock.patch('solvebio.resource.Vault.all')\n @mock.patch('solvebio.resource.Object.all')\n @mock.patch('solvebio.resource.Dataset.create')\n @mock.patch('solvebio.resource.DatasetTemplate.create')\n def test_create_dataset_upload_template(self, TmplCreate,\n DatasetCreate, ObjectAll,\n VaultAll):\n TmplCreate.side_effect = fake_dataset_tmpl_create\n DatasetCreate.side_effect = fake_dataset_create\n ObjectAll.side_effect = fake_object_all\n VaultAll.side_effect = fake_vault_all\n\n template_path = os.path.join(os.path.dirname(__file__),\n \"data/template.json\")\n args = ['create-dataset', 'test-dataset',\n '--template-file', template_path,\n '--capacity', 'medium',\n '--vault', 'test',\n '--path', '/'] # noqa\n\n ds = main.main(args)\n self.assertEqual(ds.description,\n 'Created with dataset template: 100')\n\n @mock.patch('solvebio.resource.Vault.all')\n @mock.patch('solvebio.resource.Object.all')\n @mock.patch('solvebio.resource.Dataset.create')\n @mock.patch('solvebio.resource.DatasetTemplate.retrieve')\n def test_create_dataset_template_id(self, TmplRetrieve, DatasetCreate,\n ObjectAll, VaultAll):\n VaultAll.side_effect = fake_vault_all\n ObjectAll.side_effect = fake_object_all\n DatasetCreate.side_effect = fake_dataset_create\n TmplRetrieve.side_effect = fake_dataset_tmpl_retrieve\n\n # create template\n template_path = os.path.join(os.path.dirname(__file__),\n \"data/template.json\")\n with open(template_path, 'r') as fp:\n tpl_json = json.load(fp)\n\n tpl = DatasetTemplate.create(**tpl_json)\n args = ['create-dataset', 'test-dataset',\n '--template-id', str(tpl.id),\n '--capacity', 'small',\n '--vault', 'test',\n '--path', '/'] # noqa\n\n ds = main.main(args)\n self.assertEqual(ds.description,\n 'Created with dataset template: {0}'.format(tpl.id))\n","sub_path":"solvebio/test/test_shortcuts.py","file_name":"test_shortcuts.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"89041767","text":"#!/usr/bin/python\n#\n# Copyright 2018-2021 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\n\"\"\"\n Polyaxon SDKs and REST API specification.\n\n Polyaxon SDKs and REST API specification. # noqa: E501\n\n The version of the OpenAPI document: 1.11.3\n Contact: contact@polyaxon.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom polyaxon_sdk.configuration import Configuration\n\n\nclass V1Matrix(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'random': 'V1RandomSearch',\n 'grid': 'V1GridSearch',\n 'hyperband': 'V1Hyperband',\n 'bayes': 'V1Bayes',\n 'hyperopt': 'V1Hyperopt',\n 'iterative': 'V1Iterative',\n 'mapping': 'V1Mapping'\n }\n\n attribute_map = {\n 'random': 'random',\n 'grid': 'grid',\n 'hyperband': 'hyperband',\n 'bayes': 'bayes',\n 'hyperopt': 'hyperopt',\n 'iterative': 'iterative',\n 'mapping': 'mapping'\n }\n\n def __init__(self, random=None, grid=None, hyperband=None, bayes=None, hyperopt=None, iterative=None, mapping=None, local_vars_configuration=None): # noqa: E501\n \"\"\"V1Matrix - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._random = None\n self._grid = None\n self._hyperband = None\n self._bayes = None\n self._hyperopt = None\n self._iterative = None\n self._mapping = None\n self.discriminator = None\n\n if random is not None:\n self.random = random\n if grid is not None:\n self.grid = grid\n if hyperband is not None:\n self.hyperband = hyperband\n if bayes is not None:\n self.bayes = bayes\n if hyperopt is not None:\n self.hyperopt = hyperopt\n if iterative is not None:\n self.iterative = iterative\n if mapping is not None:\n self.mapping = mapping\n\n @property\n def random(self):\n \"\"\"Gets the random of this V1Matrix. # noqa: E501\n\n\n :return: The random of this V1Matrix. # noqa: E501\n :rtype: V1RandomSearch\n \"\"\"\n return self._random\n\n @random.setter\n def random(self, random):\n \"\"\"Sets the random of this V1Matrix.\n\n\n :param random: The random of this V1Matrix. # noqa: E501\n :type: V1RandomSearch\n \"\"\"\n\n self._random = random\n\n @property\n def grid(self):\n \"\"\"Gets the grid of this V1Matrix. # noqa: E501\n\n\n :return: The grid of this V1Matrix. # noqa: E501\n :rtype: V1GridSearch\n \"\"\"\n return self._grid\n\n @grid.setter\n def grid(self, grid):\n \"\"\"Sets the grid of this V1Matrix.\n\n\n :param grid: The grid of this V1Matrix. # noqa: E501\n :type: V1GridSearch\n \"\"\"\n\n self._grid = grid\n\n @property\n def hyperband(self):\n \"\"\"Gets the hyperband of this V1Matrix. # noqa: E501\n\n\n :return: The hyperband of this V1Matrix. # noqa: E501\n :rtype: V1Hyperband\n \"\"\"\n return self._hyperband\n\n @hyperband.setter\n def hyperband(self, hyperband):\n \"\"\"Sets the hyperband of this V1Matrix.\n\n\n :param hyperband: The hyperband of this V1Matrix. # noqa: E501\n :type: V1Hyperband\n \"\"\"\n\n self._hyperband = hyperband\n\n @property\n def bayes(self):\n \"\"\"Gets the bayes of this V1Matrix. # noqa: E501\n\n\n :return: The bayes of this V1Matrix. # noqa: E501\n :rtype: V1Bayes\n \"\"\"\n return self._bayes\n\n @bayes.setter\n def bayes(self, bayes):\n \"\"\"Sets the bayes of this V1Matrix.\n\n\n :param bayes: The bayes of this V1Matrix. # noqa: E501\n :type: V1Bayes\n \"\"\"\n\n self._bayes = bayes\n\n @property\n def hyperopt(self):\n \"\"\"Gets the hyperopt of this V1Matrix. # noqa: E501\n\n\n :return: The hyperopt of this V1Matrix. # noqa: E501\n :rtype: V1Hyperopt\n \"\"\"\n return self._hyperopt\n\n @hyperopt.setter\n def hyperopt(self, hyperopt):\n \"\"\"Sets the hyperopt of this V1Matrix.\n\n\n :param hyperopt: The hyperopt of this V1Matrix. # noqa: E501\n :type: V1Hyperopt\n \"\"\"\n\n self._hyperopt = hyperopt\n\n @property\n def iterative(self):\n \"\"\"Gets the iterative of this V1Matrix. # noqa: E501\n\n\n :return: The iterative of this V1Matrix. # noqa: E501\n :rtype: V1Iterative\n \"\"\"\n return self._iterative\n\n @iterative.setter\n def iterative(self, iterative):\n \"\"\"Sets the iterative of this V1Matrix.\n\n\n :param iterative: The iterative of this V1Matrix. # noqa: E501\n :type: V1Iterative\n \"\"\"\n\n self._iterative = iterative\n\n @property\n def mapping(self):\n \"\"\"Gets the mapping of this V1Matrix. # noqa: E501\n\n\n :return: The mapping of this V1Matrix. # noqa: E501\n :rtype: V1Mapping\n \"\"\"\n return self._mapping\n\n @mapping.setter\n def mapping(self, mapping):\n \"\"\"Sets the mapping of this V1Matrix.\n\n\n :param mapping: The mapping of this V1Matrix. # noqa: E501\n :type: V1Mapping\n \"\"\"\n\n self._mapping = mapping\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, V1Matrix):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, V1Matrix):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"sdks/python/http_client/v1/polyaxon_sdk/models/v1_matrix.py","file_name":"v1_matrix.py","file_ext":"py","file_size_in_byte":7755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"121236218","text":"##BTC\nclass Strategy():\n # option setting needed\n def __setitem__(self, key, value):\n self.options[key] = value\n\n # option setting needed\n def __getitem__(self, key):\n return self.options.get(key, '')\n\n def __init__(self):\n # strategy property\n self.subscribedBooks = {\n 'Bitfinex': {\n 'pairs': ['BTC-USDT'],\n },\n }\n self.first = 0\n self.period = 10 * 60\n self.options = {}\n\n # user defined class attribute\n self.last_type = 'sell'\n self.last_cross_status = None\n self.close_price_trace = np.array([])\n self.ma_long = 10\n self.ma_short = 5\n self.UP = 1\n self.DOWN = 2\n self.stock_base = 12\n\n def on_order_state_change(self, order):\n Log(\"on order state change message: \" + str(order) + \" order price: \" + str(order[\"price\"]))\n\n def get_current_ma_cross(self):\n s_ma = talib.SMA(self.close_price_trace, self.ma_short)[-1]\n l_ma = talib.SMA(self.close_price_trace, self.ma_long)[-1]\n if np.isnan(s_ma) or np.isnan(l_ma):\n return None\n if s_ma > l_ma:\n return self.UP\n return self.DOWN\n\n\n # called every self.period\n def trade(self, information):\n exchange = list(information['candles'])[0]\n pair = list(information['candles'][exchange])[0]\n target_currency = pair.split('-')[0] #BTC\n base_currency = pair.split('-')[1] #USDT\n base_currency_amount = self['assets'][exchange][base_currency] \n target_currency_amount = self['assets'][exchange][target_currency]\n # add latest price into trace\n close_price = information['candles'][exchange][pair][0]['close']\n high_price = information['candles'][exchange][pair][0]['high']\n self.close_price_trace = np.append(self.close_price_trace, [float(close_price)])\n # only keep max length of ma_long count elements\n self.close_price_trace = self.close_price_trace[-self.ma_long:]\n # calculate current ma cross status\n cur_cross = self.get_current_ma_cross()\n if cur_cross is None:\n return []\n if self.last_cross_status is None:\n self.last_cross_status = cur_cross\n return []\n # cross up\n if self.first == 0:\n Log('buying 0.5 unit of ' + str(target_currency))\n self.last_type = 'buy'\n self.last_cross_status = cur_cross\n return [\n {\n 'exchange': exchange,\n 'amount': 0.5,\n 'price': -1,\n 'type': 'MARKET',\n 'pair': pair,\n }\n ]\n self.first = 1\n if close_price < 35500:\n Log('buying 200 unit of ' + str(target_currency))\n self.last_type = 'buy'\n self.last_cross_status = cur_cross\n money = self['assets'][exchange]['USDT']\n if close_price < 34000:\n money_amount = money / close_price\n else:\n money_amount = self.stock_base * (close_price-34000) / 34000\n return [\n {\n 'exchange': exchange,\n 'amount': money_amount,\n 'price': 34000,\n 'type': 'LIMIT',\n 'pair': pair,\n }\n ]\n # cross down\n elif close_price > 37000:\n Log('assets before selling: ' + str(self['assets'][exchange][base_currency]))\n self.last_type = 'sell'\n self.last_cross_status = cur_cross\n if close_price > 40000 :\n money_amount = target_currency_amount\n else:\n money_amount = self.stock_base * (close_price-37000) / 37000\n return [\n {\n 'exchange': exchange,\n 'amount': -money_amount,\n 'price': 37000,\n 'type': 'LIMIT',\n 'pair': pair,\n }\n ]\n # 15%停損\n if close_price < high_price * 0.85 and target_currency_amount > 0.0:\n Log('assets before selling: ' + str(self['assets'][exchange][base_currency]))\n Log('selling, ' + exchange + ':' + pair)\n self.last_type = 'sell'\n\n sell = target_currency_amount\n self.last_price = close_price\n # if sell > BTC_amount:\n # return []\n\n return [\n {\n 'exchange': exchange,\n 'amount': -sell,\n 'price': -1,\n 'type': 'MARKET',\n 'pair': pair,\n }\n ]\n self.last_cross_status = cur_cross\n return []\n","sub_path":"BTC.py","file_name":"BTC.py","file_ext":"py","file_size_in_byte":4879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"136934223","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n#hhids=[86, 59, 77, 26, 93, 101, 114, 171, 1086, 1403]\nhhids=['1202', '871', '1103', '585', '59', '2755', '2233', '86', '114', '2575', '1700', '974', '1800',\n '370', '187', '1169', '1718', '545', '94', '2018', '744', '2859', '2925', '484', '2953', '171', '2818', '1953',\n '1697', '1463', '499', '1790', '1507', '1642', '93', '1632',\n '1500', '2472', '2072', '2378', '1415', '2986', '1403', '2945', '77', '1792',\n '624', '379', '2557', '890', '1192', '26', '2787', '2965', '2980', '434', '2829',\n '503', '2532', '946', '2401', '1801','2337','1086','1714','1283','252','2814']\n\nscenarios=[\"sb4b64\",\"sb4b135\",\"sb8b64\",\"sb8b135\",\"sb10b64\",\"sb10b135\",\"sb20b64\",\"sb20b135\"]\n#compile\nfor j in scenarios:\n nos_list=[]\n nob_list=[]\n nob_fg_list=[]\n nob_tg_list=[]\n rbc_list=[]\n rbc_fg_list=[]\n rbc_tg_list=[]\n mpc_list=[]\n mpc_fg_list=[]\n mpc_tg_list=[]\n\n print(\"In scenarios\",j)\n csvfile = open('roi_table_all/panel4{}.csv'.format(j),'w', newline='')\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow([\"Home\",\"no_solar\",\"no_battery\",\"NoB FG\",\"NoB TG\",\"RBC\",\"RBC FG\",\"RBC TG\",\"MPC\",\"MPC FG\",\"MPC TG\"])\n for i in hhids:\n #no solar no battery\n #table = pd.read_csv('nopv_4/{}_2_nopv/{}.csv'.format(i,j))\n if j[2]=='4':\n table = pd.read_csv('nopv_4/{}_4_nopv/sb4b0.csv'.format(i))\n if j[2]=='8':\n table = pd.read_csv('nopv_4/{}_4_nopv/sb8b0.csv'.format(i))\n if j[2]=='1':\n table = pd.read_csv('nopv_4/{}_4_nopv/sb10b0.csv'.format(i))\n if j[2]=='2':\n table = pd.read_csv('nopv_4/{}_4_nopv/sb20b0.csv'.format(i))\n nos = table['Base_Bill'].iloc[8735]-table['Base_Bill'].iloc[167]\n print(\"no solar bill: \",nos)\n nos_list.append(nos)\n\n #solar no battery\n if j[2]=='4':\n table = pd.read_csv('nostorage_4/{}_4_nostorage/sb4b0.csv'.format(i))\n if j[2]=='8':\n table = pd.read_csv('nostorage_4/{}_4_nostorage/sb8b0.csv'.format(i))\n if j[2]=='1':\n table = pd.read_csv('nostorage_4/{}_4_nostorage/sb10b0.csv'.format(i))\n if j[2]=='2':\n table = pd.read_csv('nostorage_4/{}_4_nostorage/sb20b0.csv'.format(i))\n nob = table['Base_Bill'].iloc[8735]-table['Base_Bill'].iloc[167]\n print(\"no battery bill: \",nob)\n nob_list.append(nob)\n nob_fg = nos\n nob_fg_list.append(nob_fg)\n nob_tg = -1*(nob_fg-nob)\n nob_tg_list.append(nob_tg)\n\n\n #Baseline bill\n table = pd.read_csv('rbc_4_bill/{}_4_rbc_bill/{}.csv'.format(i,j))\n rbc = table['Bill'].iloc[-1]\n rbc_fg = table['Total FG bill'].iloc[-1]\n rbc_tg = table['Total TG bill'].iloc[-1]\n print(\" bill: \",rbc)\n rbc_list.append(rbc)\n rbc_fg_list.append(rbc_fg)\n rbc_tg_list.append(rbc_tg)\n\n\n\n #MPC\n table = pd.read_csv('mpc_4_par/{}_4_mpc_par/{}.csv'.format(i,j))\n mpc = table['Bill'].iloc[-1]\n mpc_fg = table['Total FG bill'].iloc[-1]\n mpc_tg = table['Total TG bill'].iloc[-1]\n print(\"mpc bill: \",mpc)\n mpc_list.append(mpc)\n mpc_fg_list.append(mpc_fg)\n mpc_tg_list.append(mpc_tg)\n print(\"\\n\")\n\n writer.writerow([i,nos,nob,nob_fg,nob_tg,rbc,rbc_fg,rbc_tg,mpc,mpc_fg,mpc_tg])\n writer.writerow([\"mean\",np.mean(nos_list),np.mean(nob_list),np.mean(nob_fg_list),np.mean(nob_tg_list),np.mean(rbc_list),np.mean(rbc_fg_list),np.mean(rbc_tg_list),np.mean(mpc_list),np.mean(mpc_fg_list),np.mean(mpc_tg_list)])\n csvfile.close()\n","sub_path":"Plot/bill_table4.py","file_name":"bill_table4.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"154282350","text":"import random\nimport sys\nimport copy\nsys.path.append(\"..\")\nfrom utils import Action\n\nclass Block_card_user(object) :\n def __init__(self,\n templates=None,\n list_of_user_profiles=None,\n user_values=None,\n turn_compression=False,\n new_api=False,\n another_slot=False,\n audit_more=False) :\n \n # Below is the available pool of values from which we will create a Custom user for the transaction\n #self.user_names = [\"Sourabh\",\"Serra\",\"Simone\",\"Marco\",\"Vevake\",\"Matteo\",\"Tahir\",\"Samuel\"]\n self.user_accounts = user_values[\"user_accounts\"]\n self.user_card_names = user_values[\"card_names\"]\n self.slots = [\"card_id\"]\n self.templates = templates\n \n self.priority_states = list()\n self.priority_actions = dict()\n \n self.turn_compression = turn_compression\n self.new_api = new_api\n self.another_slot = another_slot\n self.audit_more = audit_more\n \n self.override = False\n self.state_track = dict()\n self.state_track[\"CHANGE_CARD_ID\"] = 0\n self.state_track[\"CHANGE_CARD_NAME\"] = 0\n self.state_track[\"CHANGE_ACCOUNT\"] = 0\n \n # create the custom user\n self.user = dict()\n \n row_chosen = random.randint(0,len(list_of_user_profiles)-1)\n user_chosen = list_of_user_profiles[row_chosen]\n \n self.create_user_profile(user_chosen)\n \n def sort_my_slots(self,slots_given) :\n slots_sorted = list()\n \n if \"card_id\" in slots_given :\n slots_sorted.append(\"card_id\")\n slots_given.remove(\"card_id\")\n \n \n for slot in slots_given :\n slots_sorted.append(slot)\n \n return slots_sorted\n \n def create_user_profile(self,user_chosen) :\n \n # Every value is assigned randomly \n \n # selectinng name of sender and reciever\n \n self.user[\"name\"] = user_chosen[\"name\"]\n \n #selecting the usr_account to make the transaction from\n \n \n # select at random the number of account the user has.\n #number_of_account = random.randint(1,len(self.user_accounts))\n \n #self.user[\"user_accounts\"] = random.sample(self.user_accounts,number_of_account)\n #self.user[\"user_accounts\"].sort()\n \n \n \n self.user[\"user_accounts\"] = user_chosen[\"user_accounts\"].strip().split(',')\n \n # select a list of accounts from the given sample\n \n self.user[\"user_account\"] = random.sample(self.user_accounts,1)[0]\n \n # creating a card id for the user\n r_account = random.sample(self.user_accounts,1)[0]\n r_card_name = random.sample(self.user_card_names,1)[0]\n \n self.user[\"card_id\"] = \"{}-{}\".format(r_account,r_card_name)\n self.user[\"card_names\"] = self.user_card_names\n self.user[\"card_name\"] = random.sample(self.user_card_names,1)[0]\n \n \n #number_of_accounts_with_cards = random.randint(0,len(self.user_accounts))\n \n self.user_account_card_name_pair = dict()\n self.user_account_with_cards = list()\n \n self.user[\"card_ids\"] = user_chosen[\"card_ids\"].strip().split(',')\n \n for card_id in self.user[\"card_ids\"] :\n card_name, linked_account = card_id.split('-')\n if linked_account not in self.user_account_with_cards :\n self.user_account_with_cards.append(linked_account)\n \n if linked_account in self.user_account_card_name_pair.keys() :\n list_of_cards = self.user_account_card_name_pair[linked_account]\n else :\n list_of_cards = list()\n \n list_of_cards.append(card_name)\n self.user_account_card_name_pair[linked_account] = list_of_cards\n \n \n \n \n # setting up the intent\n self.user[\"intent\"] = \"block_card\"\n self.user[\"domain_description\"] = \"block_card_memory_network\"\n \n # Returns the respective value of the slot\n \n def remove_slot(self,slot_given) :\n if slot_given in self.slots :\n self.slots.remove(slot_given)\n \n def get_value(self,slot_asked) :\n \n return self.user[slot_asked]\n \n # This function is called when the bot has made a request but no slots have been provided, hence we look at the description of the action to figure out what the request is\n def perform_random_action(self,bot_action) :\n \n user_action = None\n actual_actor = None\n actual_action = None\n accept_message = str()\n reject_message = str()\n values_to_give = dict()\n pattern_to_give = list()\n \n if bot_action.get_description() == \"SELECT_ACCOUNT\" :\n \n self.user[\"user_account\"] = random.sample(self.user_accounts,1)[0]\n \n user_action = Action(actor=\"User\",\n action=\"inform\",\n slots=[\"user_account\"],\n values={\"user_account\" : self.user[\"user_account\"]},\n message=\"providing value for user_account\",\n templates=self.templates)\n \n elif bot_action.get_description() == \"SELECT_CARD\" :\n \n user_action = Action(actor=\"User\",\n action=\"inform\",\n slots=[\"card_name\"],\n values={\"card_name\" : self.user[\"card_name\"]},\n message=\"providing the card name to the bot\",\n templates=self.templates)\n \n else :\n \n if bot_action.get_description() == \"API_CALL\" :\n \n actual_actor = \"API\"\n actual_action = \"api_response\"\n accept_message = \"api_response:block_card_api, api_result:success\"\n reject_message = \"api_response:block_card_api, api_result:failed\"\n \n \n elif bot_action.get_description() == \"CHANGE_ACCOUNT\" :\n \n \n \n new_account = random.sample(self.user_accounts,1)[0]\n \n while new_account == self.user[\"user_account\"] :\n new_account = random.sample(self.user_accounts,1)[0]\n \n self.user[\"user_account\"] = new_account\n \n if self.state_track[\"CHANGE_ACCOUNT\"] > 2 :\n self.override = True\n new_account = random.sample(self.user[\"user_accounts\"],1)[0]\n self.user[\"user_account\"] = new_account\n \n actual_actor = \"User\"\n actual_action = \"inform\"\n accept_message = \"accept\"\n reject_message = \"reject\"\n \n if self.turn_compression :\n accept_message = \"accept use {}\".format(new_account)\n pattern_to_give.append(\"turn_compression\")\n \n values_to_give = {\"user_account\" : new_account}\n self.state_track[\"CHANGE_ACCOUNT\"] += 1\n \n elif bot_action.get_description() == \"CHANGE_CARD_NAME\" :\n \n \n new_card_name = random.sample(self.user_card_names,1)[0]\n \n while new_card_name == self.user[\"card_name\"] :\n new_card_name = random.sample(self.user_card_names,1)[0]\n \n self.user[\"card_name\"] = new_card_name\n if self.state_track[\"CHANGE_CARD_NAME\"] > 2 :\n self.override = True\n new_card_name = random.sample(self.user[\"card_names\"],1)[0]\n self.user[\"card_name\"] = new_card_name\n \n actual_actor = \"User\"\n actual_action = \"inform\"\n accept_message = \"accept\"\n reject_message = \"reject\"\n \n if self.turn_compression :\n accept_message = \"accept use {}\".format(new_card_name)\n pattern_to_give.append(\"turn_compression\")\n \n values_to_give = {\"card_name\" : new_card_name}\n \n self.state_track[\"CHANGE_CARD_NAME\"] += 1\n \n elif bot_action.get_description() == \"CHANGE_CARD_ID\" :\n \n r_account = random.sample(self.user_accounts,1)[0]\n r_card_name = random.sample(self.user_card_names,1)[0]\n \n new_card_id = \"{}-{}\".format(r_account,r_card_name)\n \n while new_card_id == self.user[\"card_id\"] :\n r_account = random.sample(self.user_accounts,1)[0]\n r_card_name = random.sample(self.user_card_names,1)[0]\n \n new_card_id = \"{}-{}\".format(r_account,r_card_name)\n \n if self.state_track[\"CHANGE_CARD_ID\"] > 2 :\n self.override = True\n \n new_card_id = random.sample(self.user[\"card_ids\"],1)[0]\n self.user[\"card_id\"] = new_card_id\n #print(\"new card id chosen is : {}\".format(new_card_id))\n \n actual_actor = \"User\"\n actual_action = \"inform\"\n accept_message = \"accept\"\n reject_message = \"reject\"\n \n if self.turn_compression :\n accept_message = \"accept use {}\".format(new_card_id)\n pattern_to_give.append(\"turn_compression\")\n \n values_to_give = {\"card_id\" : new_card_id}\n self.state_track[\"CHANGE_CARD_ID\"] += 1\n else :\n \n actual_actor = \"User\"\n actual_action = \"inform\"\n accept_message = \"accept\"\n reject_message = \"reject\"\n \n toss = random.randint(0,100)\n if toss > 10 or self.override :\n self.override = False\n user_action = Action(actor=actual_actor,\n action=actual_action,\n slots=None,\n values=values_to_give,\n message=accept_message,\n templates=self.templates,\n pattern_marker=pattern_to_give)\n else :\n \n user_action = Action(actor=actual_actor,\n action=actual_action,\n slots=None,\n values=values_to_give,\n message=reject_message,\n templates=self.templates)\n return user_action\n # This is the function that converses with the bot through 'Action' Objects\n def speak(self,bot_action) :\n user_action = None\n if bot_action.get_action() == \"api_call\" :\n \n user_action = self.api_response(bot_action) \n\n elif bot_action.get_action() == \"request\" :\n \n if bot_action.get_slots() != None :\n \n if bot_action.get_slots()[0] != \"intent\" :\n \n if \"card_id\" in bot_action.get_slots() :\n toss = random.randint(0,100)\n \n if toss > 20 :\n user_value = self.get_value(\"card_id\")\n user_action = Action(actor=\"User\",\n action=\"inform\",\n slots=[\"card_id\"],\n values={\"card_id\" : user_value},\n message=\"providing value for card_id\",\n templates=self.templates)\n else :\n \n user_action = Action(actor=\"User\",\n action=\"card_id_not_know\",\n slots=None,\n values={\"user_account\" : self.user[\"user_account\"]},\n message=\"Providing value for {}\".format(bot_action.get_slots()[0]),\n description=\"CARD_ID_NOT_KNOW\",\n templates=self.templates)\n else :\n \n slot_to_inform = bot_action.get_slots()[0]\n \n if self.another_slot and self.slots :\n slots_to_choose_from = copy.deepcopy(self.slots)\n if len(slots_to_choose_from) > 1 :\n self.remove_slot(slot_to_inform)\n #slots_to_choose_from.remove(slot_to_inform)\n \n slot_chosen_to_inform = random.sample(slots_to_choose_from,1)[0]\n value_for_other_slot = self.get_value(slot_chosen_to_inform)\n \n user_action = Action(actor=\"User\",\n action=\"inform\",\n slots=[slot_chosen_to_inform],\n values={slot_chosen_to_inform : value_for_other_slot},\n message=\"Providing value for {}\".format(slot_chosen_to_inform),\n description=\"ANOTHER_SLOT_VALUE\",\n templates=self.templates)\n \n self.remove_slot(slot_chosen_to_inform)\n #self.slots.remove(slot_chosen_to_inform)\n else :\n \n user_value = self.get_value(slot_to_inform)\n \n user_action = Action(actor=\"User\",\n action=\"inform\",\n slots=bot_action.get_slots(),\n values={bot_action.get_slots()[0] : user_value},\n message=\"Providing value for {}\".format(bot_action.get_slots()[0]),\n slot_concerned=bot_action.get_slots()[0],\n templates=self.templates)\n \n self.remove_slot(slot_to_inform)\n #self.slots.remove(slot_to_inform)\n \n else :\n \n rem = 0\n pattern_to_give = list()\n if self.new_api :\n rem = 1\n pattern_to_give.append(\"new_api\")\n \n number_of_slots = random.randint(0,len(self.slots))\n \n while number_of_slots %2 != rem :\n number_of_slots = random.randint(0,len(self.slots))\n \n slots_to_inform = random.sample(self.slots,number_of_slots)\n all_slots = [\"intent\",\"domain_description\"] + self.sort_my_slots(slots_to_inform)\n values_to_inform = dict()\n \n for slot in all_slots :\n values_to_inform[slot] = self.user[slot]\n \n values_to_inform[\"name\"] = self.user[\"name\"]\n \n user_action = Action(actor=\"User\",\n action=\"inform\",\n slots=all_slots,\n values=values_to_inform,\n message=\"Providing value for intent\",\n templates=self.templates,\n pattern_marker=pattern_to_give)\n else:\n \n user_action = self.perform_random_action(bot_action)\n \n \n else :\n \n if bot_action.get_action() != None :\n user_action = self.perform_random_action(bot_action)\n else :\n user_action = Action(actor=\"User\",\n action=None,\n slots=None,\n values=None,\n message=\"\",\n templates=self.templates)\n \n return user_action\n \n # when the bot takes the role of API then, the User should assume the role of API_RESP (i.e API_RESPONSE)\n def api_response(self,bot_action) :\n \n user_action = None\n \n # if the API action asks for a account check\n if bot_action.get_description() == \"REQUEST_ACCOUNTS\" :\n \n slot_message = \",\".join(self.user[\"user_accounts\"])\n bot_message = \"api_response:request_accounts_api, list_of_accounts:{}\".format(slot_message)\n user_action = Action(actor=\"API\",\n action=None,\n slots = self.user[\"user_accounts\"],\n values=None,\n message=bot_message,\n description=\"LIST_OF_SLOTS\",\n templates=self.templates)\n \n elif bot_action.get_description() == \"API_INITIAL_SLOT_CHECK\" :\n flag = False\n error_message = list()\n order_of_slots = list()\n if \"card_id\" in bot_action.get_slots() and self.user[\"card_id\"] not in self.user[\"card_ids\"] :\n \n self.priority_states.append(\"change_card_id\")\n order_of_slots.append(\"change_card_id\")\n slot_message = \",\".join(self.user[\"user_accounts\"])\n bot_message = \"It seems that you have not entered a valid account, you available accounts are {}, would you like change the source account ?\".format(slot_message)\n self.priority_actions[\"change_card_id\"] = Action(actor=\"Bot\",\n action=\"request\",\n slots=None,\n values=None,\n message=bot_message,\n description=\"CHANGE_ACCOUNT\",\n templates=self.templates)\n \n if self.priority_states :\n order_message = ','.join(order_of_slots)\n user_action = Action(actor=\"API\",\n action=None,\n slots=self.priority_states,\n values=self.priority_actions,\n message=\"api_response:initial_slot_check_api, api_result:failed, message:'{}'\".format(order_message),\n templates=self.templates)\n else :\n user_action = Action(actor=\"API\",\n action=\"api_response\",\n slots=bot_action.get_slots(),\n values=None,\n message=\"api_response:initial_slot_check_api, api_result:success\",\n templates=self.templates)\n \n elif bot_action.get_description() == \"API_CARD_ID_CHECK\" :\n \n #print(\"card ids allowed : {}\".format(self.user[\"card_id\"]))\n if self.user[\"card_id\"] in self.user[\"card_ids\"] :\n \n user_action = Action(actor=\"API\",\n action=\"api_response\",\n slots=[\"card_id\"],\n values=None,\n message=\"api_response:check_card_id_api, api_result:success\",\n templates=self.templates)\n else :\n \n user_action = Action(actor=\"API\",\n action=\"api_response\",\n slots=[\"card_id\"],\n values=None,\n message=\"api_response:check_card_id_api, api_result:failed, message:'change_card_id'\",\n templates=self.templates)\n \n elif bot_action.get_description() == \"API_ACCOUNT_CHECK\" :\n \n #print(\"checking account\")\n if self.user[\"user_account\"] in self.user_account_with_cards :\n \n if self.user[\"user_account\"] in self.user[\"user_accounts\"] :\n \n user_action = Action(actor=\"API\",\n action=\"api_response\",\n slots=[\"user_account\"],\n values={\"card_names\" : self.user[\"card_names\"]},\n message=\"api_response:account_check_api, api_result:success\",\n templates=self.templates)\n else :\n \n slot_message = ','.join(self.user_account_with_cards)\n api_message = \"api_response:account_check_api, api_result:failed, message:'avalilable list of accounts : {}'\".format(slot_message)\n user_action = Action(actor=\"API\",\n action=\"api_response\",\n slots=self.user_account_with_cards,\n values={\"user_accounts\" : self.user_account_with_cards},\n message=api_message,\n description=\"NO_CARD_FOR_USER_ACCOUNT\",\n templates=self.templates)\n \n else :\n \n slot_message = ','.join(self.user[\"user_accounts\"])\n api_message = \"api_response:account_check_api, api_result:failed, message:'available list of accounts : {}'\".format(slot_message)\n user_action = Action(actor=\"API\",\n action=\"api_response\",\n slots=self.user[\"user_accounts\"],\n values={\"user_accounts\" : self.user[\"user_accounts\"]},\n message=api_message,\n description=\"NO_USER_ACCOUNT\",\n templates=self.templates)\n \n elif bot_action.get_description() == \"API_CARD_NAME_CHECK\" :\n \n \n if self.user[\"user_account\"] in self.user_account_with_cards and self.user[\"card_name\"] in self.user_account_card_name_pair[self.user[\"user_account\"]] :\n \n card_id = \"{}-{}\".format(self.user[\"user_account\"],self.user[\"card_name\"])\n user_action = Action(actor=\"API\",\n action=\"api_response\",\n slots=[\"card_id\"],\n values={\"card_id\" : card_id},\n message=\"api_response:card_name_check_api, api_result:success\",\n templates=self.templates)\n else :\n \n slot_message = ','.join(self.user_account_card_name_pair[self.user[\"user_account\"]])\n user_action = Action(actor=\"API\",\n action=\"api_response\",\n slots=self.user_account_card_name_pair[self.user[\"user_account\"]],\n values={\"card_names\" : self.user_account_card_name_pair[self.user[\"user_account\"]]},\n message=\"api_response:card_name_api, api_result:failed, message:'available list of cards is : {}'\".format(slot_message),\n templates=self.templates)\n\n\n \n else :\n \n user_action = self.perform_random_action(bot_action)\n \n \n return user_action ","sub_path":"Dialog_Generator/User/.ipynb_checkpoints/block_card_user-checkpoint.py","file_name":"block_card_user-checkpoint.py","file_ext":"py","file_size_in_byte":25737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"316847565","text":"import urllib.request as req\nimport os.path, random\nimport json\n\nurl = \"https://api.aoikujira.com/hyakunin/get.php?fmt=json\"\nsavename = \"hyakunin.json\"\n\nif not os.path.exists(url):\n req.urlretrieve(url, savename)\n\ndata = json.load(open(savename, \"r\", encoding=\"utf-8\"))\n\nr = random.choice(data)\nprint(r['kami'], r['simo'])","sub_path":"sample/scraping_and_machine_lerning/ch3/json-hyakunin.py","file_name":"json-hyakunin.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"349629380","text":"from collections import namedtuple\n\n\ndef greedy(items, capacity):\n\n # a trivial greedy algorithm for filling the knapsack\n # it takes items in-order until the knapsack is full\n\n value = 0\n weight = 0\n taken = [0] * len(items)\n\n for index, item in enumerate(items):\n if weight + item.weight <= capacity:\n taken[index] = 1\n value += item.value\n weight += item.weight\n\n return taken\n\n\ndef validate_results(results_vector, items, capacity):\n\n if len(results_vector) != len(items):\n raise ValueError('Results dimension mismatch')\n\n print('Solution value: ' + str(get_stored_value(results_vector, items)))\n\n acc = 0\n\n for idx, val in enumerate(results_vector):\n if val == 1:\n acc = acc + items[idx][2]\n\n if acc <= capacity:\n print('Results are feasible, weight: ' + str(acc))\n return True\n else:\n print('Results are infeasible!! weight: ' + str(acc))\n return False\n\n\ndef score_results(results_vector, items):\n\n acc = 0\n\n for idx, val in enumerate(results_vector):\n if val == 1:\n acc = acc + items[idx][1]\n\n print('Score: ' + str(acc))\n return acc\n\n\ndef map_item_density(items):\n\n items_inc_density = []\n seen = []\n items_density = namedtuple(\"Item\", ['index', 'value', 'weight', 'density'])\n for i in items:\n density = i[1] / i[2]\n items_inc_density.append(items_density(i[0], int(i[1]), int(i[2]), density))\n\n if density not in seen:\n seen.append(density)\n else:\n print('Duplicate density: ' + str(density))\n\n return items_inc_density\n\n\ndef branch_and_bound(items, capacity):\n\n feasible_solutions = []\n\n # Guess the solution\n guess_vector = greedy(items, capacity)\n best_estimate = get_stored_value(guess_vector, items)\n print('Initial guess:' + str(best_estimate))\n validate_results(guess_vector, items, capacity)\n feasible_solutions.append({'path_vector': guess_vector, 'value': best_estimate})\n\n # Attempt to find a better solution\n feasible_solutions, best_estimate = traverse_branch(items, best_estimate, [], feasible_solutions, capacity, capacity,\n branch_direction='left')\n\n feasible_solutions, best_estimate = traverse_branch(items, best_estimate, [], feasible_solutions, capacity, capacity,\n branch_direction='right')\n\n # Select the best result\n best_path_vector = [element for element in feasible_solutions if element['value'] == best_estimate][0]['path_vector']\n value = get_stored_value(best_path_vector, items)\n\n print('Best solution: ' + str(value))\n\n return best_path_vector\n\n\ndef traverse_branch(items, best_estimate, path_vector, solutions, remaining_capacity, total_capacity, branch_direction):\n\n # Define the current choice\n if len(path_vector) == len(items):\n return solutions, best_estimate\n elif branch_direction == 'left':\n new_path_vector = path_vector + [1]\n else:\n new_path_vector = path_vector + [0]\n\n # Check feasibility\n if get_current_weight(items, new_path_vector) > remaining_capacity:\n return solutions, best_estimate\n\n # Compute the bound\n max_branch_value = estimate_max_branch_value(items, new_path_vector, total_capacity)\n\n # Check bound:\n if best_estimate >= max_branch_value:\n return solutions, best_estimate\n\n # If this a leaf, return the current solution\n if len(new_path_vector) == len(items):\n total_value = get_stored_value(new_path_vector, items)\n solutions.append({'path_vector': new_path_vector, 'value': total_value})\n print('New best guess:' + str(total_value))\n return solutions, total_value\n\n else:\n # Otherwise continue branching\n solutions, best_estimate = traverse_branch(items, best_estimate, new_path_vector, solutions, remaining_capacity, total_capacity,\n branch_direction='left')\n\n solutions, best_estimate = traverse_branch(items, best_estimate, new_path_vector, solutions, remaining_capacity, total_capacity,\n branch_direction='right')\n\n return solutions, best_estimate\n\n\ndef estimate_max_branch_value(items, results_vector, total_capacity):\n\n \"\"\"\n iterates from depth + 1 to n, adding to the value and subtracting from the capacity\n until there is no more capacity and fill in the remaining fractional part.\n \"\"\"\n\n current_value = get_stored_value(results_vector, items)\n\n # Have we reached the end of the branch already?\n if len(results_vector) == len(items):\n return current_value\n else:\n\n path_vector_temp = results_vector\n\n # Simulate the maximum value path vector\n count_of_items = len(path_vector_temp)\n cumulative_weight = get_current_weight(items, path_vector_temp)\n\n for index, item in enumerate(items):\n if index > count_of_items-1:\n if cumulative_weight + item.weight <= total_capacity:\n current_value += item.value\n cumulative_weight += item.weight\n elif cumulative_weight < total_capacity:\n remaining_capacity = total_capacity - cumulative_weight\n fraction = remaining_capacity / item.weight\n current_value += (item.value * fraction)\n cumulative_weight += (item.weight*fraction)\n\n if cumulative_weight > total_capacity:\n raise ValueError('Allowed weight exceeded!')\n\n return current_value\n\n\ndef get_current_weight(items, results_vector):\n\n acc = 0\n\n for idx, val in enumerate(results_vector):\n if val == 1:\n acc = acc + items[idx][2]\n\n return acc\n\n\ndef get_stored_value(results_vector, items):\n\n acc = 0\n\n for idx, val in enumerate(results_vector):\n if val == 1:\n acc = acc + items[idx][1]\n\n return acc\n\n\ndef get_results_vector(result_str):\n\n result_spl = result_str.split('\\n')[1]\n\n return [int(s) for s in result_spl.split(' ')]\n\n","sub_path":"starburst/routines.py","file_name":"routines.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"535947119","text":"# title: cells-with-odd-values-in-a-matrix\n# detail: https://leetcode.com/submissions/detail/394270990/\n# datetime: Sat Sep 12 01:19:20 2020\n# runtime: 44 ms\n# memory: 14 MB\n\nclass Solution:\n def oddCells(self, n: int, m: int, indices: List[List[int]]) -> int:\n rows = [0] * n\n cols = [0] * m\n for i, j in indices:\n rows[i] += 1\n cols[j] += 1\n return sum((rows[i] + cols[j]) % 2 for i in range(n) for j in range(m))\n ","sub_path":"leetcode/cells-with-odd-values-in-a-matrix/394270990.py","file_name":"394270990.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"14607996","text":"'''\r\nAPI Post [Python]\r\n---------------------------\r\nAutor: Inove Coding School\r\nVersion: 1.0\r\n \r\nDescripcion:\r\nSe utiliza request para generar un HTTP post al servidor Flask\r\n'''\r\n\r\n__author__ = \"Inove Coding School\"\r\n__email__ = \"INFO@INOVE.COM.AR\"\r\n__version__ = \"1.0\"\r\n\r\nimport requests\r\n\r\nendpoint = 'registro'\r\n\r\nurl = f'http://127.0.0.1:5000/{endpoint}'\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n name = str(input('Ingrese el nombre de la persona:'))\r\n age = int(input('Ingrese la edad:'))\r\n post_data = {\"name\": name, \"age\": age} \r\n x = requests.post(url, data = post_data)\r\n print('POST enviado a:',url)\r\n print('Datos:')\r\n print(post_data)\r\n except:\r\n print('Error, POST no efectuado')\r\n","sub_path":"ejercicios_practica/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"215771916","text":"import numpy as np\n\nfrom constants import RE\n\n\ndef cnv_azel2latlon(az, el, site, ht=450, Re=RE):\n '''\n Function to convert from an azimuth/elevation grid to a\n latitude/longitude grid given an unwarping height and a site location.\n For elevations below the horizon, returns NaN.\n INPUTS:\n az - M x N array of azimuths to be converted [degrees]\n el - M x N array of elevation to be converted [degrees]\n site - 1 x 2 array containing [latitude, longitude] of the site\n [degrees]\n ht - scalar height to be used in the conversion [km] (default is\n 450 [km])\n Re - radius of Earth in km\n OUTPUTS:\n lat - M x N array of latitudes [degrees]\n lon - M x N array of longitudes [degrees]\n\n HISTORY:\n 17-Oct-2006: Converted from IDL by Jonathan J. Makela\n (jmakela@uiuc.edu)\n 03-Oct-2013: Converted from MATLAB to Python by Brian Harding\n (bhardin2@illinois.edu)\n '''\n # Convert inputs from degrees to radians\n el_r = np.radians(el)\n az_r = np.radians(az)\n lat_r = np.radians(site[0])\n lon_r = np.radians(site[1])\n\n # Calculate the differential angle, alpha\n temp = np.cos(el_r)/(1.+(ht/Re))\n alpha = np.arccos(temp) - el_r\n\n # Calculate the pierce point latitude\n temp = np.sin(lat_r) * np.cos(alpha) + np.cos(lat_r)*np.cos(az_r)*np.sin(alpha)\n lat_r = np.arcsin(temp)\n\n # Calculate the pierce point longitude\n temp = np.sin(alpha) * np.sin(az_r) / np.cos(lat_r)\n lon_r = np.arcsin(temp) + lon_r\n\n # Convert radian measurements to degrees\n lat = np.degrees(lat_r)\n lon = np.degrees(lon_r)\n\n return lat, lon\n","sub_path":"gps/ipp.py","file_name":"ipp.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"603858078","text":"import os\nimport tensorflow as tf\nimport numpy as np\nfrom utils.caption_generator import Caption_Generator\nfrom utils.test import test\nimport argparse\n\nparser = argparse.ArgumentParser(description='Image Captioning software')\n\nparser.add_argument('image_path', metavar = 'image to caption path', type=str, help='path of image to caption')\nargs = parser.parse_args()\nimage_path = args.image_path\n\nmodel_path = './models/tensorflow'\nvgg_path = './data/vgg16-20160129.tfmodel'\n\ndim_embed = 256\ndim_hidden = 256\ndim_in = 4096\nbatch_size = 1\nlearning_rate = 0.001\nmomentum = 0.9\nn_epochs = 25\n\nif not os.path.exists('data/ixtoword.npy'):\n print ('You must run 1. O\\'reilly Training.ipynb first.')\nelse:\n print(tf)\n print(\"Checking the reset_default_graph()\")\n tf.reset_default_graph()\n with open(vgg_path,'rb') as f:\n fileContent = f.read()\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(fileContent)\n\n images = tf.placeholder(\"float32\", [1, 224, 224, 3])\n tf.import_graph_def(graph_def, input_map={\"images\":images})\n\n ixtoword = np.load('data/ixtoword.npy').tolist()\n n_words = len(ixtoword)\n maxlen=15\n graph = tf.get_default_graph()\n sess = tf.InteractiveSession(graph=graph)\n caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, batch_size, maxlen+2, n_words)\n graph = tf.get_default_graph()\n image, generated_words = caption_generator.build_generator(maxlen=maxlen)\n print(\"Caption from Image\")\n print(test(sess, image, generated_words, ixtoword, image_path, graph, images))\n \n #for image_path in image_paths:\n #test(sess,image,generated_words,ixtoword, image_path, graph, images)\n","sub_path":"image_captioning/experimental/second_attempt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"70865350","text":"import os\nimport re\nimport json\nimport logging\nimport itertools\nimport requests\nfrom flask import Flask, render_template, jsonify, Blueprint\nfrom flask_cors import CORS\nfrom urllib3.exceptions import NewConnectionError, MaxRetryError\n\n\nETCD_ENDPOINT = os.getenv(\"ETCD_ENDPOINT\", \"http://localhost:2379\")\nETCD_USERNAME = os.getenv(\"ETCD_USERNAME\", \"\")\nETCD_PASSWORD = os.getenv(\"ETCD_PASSWORD\", \"\")\nSIDECAR_PREFIX = os.getenv(\"SIDECAR_PREFIX\", \"sidecar\")\nNODE_LABEL_COLOR = os.getenv(\"NODE_LABEL_COLOR\", \"#d8d9da\")\nNODE_API = f\"{ETCD_ENDPOINT}/v2/keys/netswatch/network/nodes\"\nSUBNET_API = f\"{ETCD_ENDPOINT}/v2/keys/netswatch/network/subnets\"\n# RAW_NODES = []\nROUTER_EDGE_WIDTH = 1\nROUTER_BACKGROUND = \"#1982C4\"\nROUTER_BORDER = \"#8AC926\"\nROUTER_SIZE = 20\n\nLOOP = int(os.getenv(\"LOOP\", \"60\")) # Main thread loop seconds\n\nclass PrefixMiddleware(object):\n\n def __init__(self, app, prefix=''):\n self.app = app\n self.prefix = prefix\n\n def __call__(self, environ, start_response):\n if environ['PATH_INFO'].startswith(self.prefix):\n environ['PATH_INFO'] = environ['PATH_INFO'][len(self.prefix):]\n environ['SCRIPT_NAME'] = self.prefix\n return self.app(environ, start_response)\n else:\n start_response('404', [('Content-Type', 'text/plain')])\n return [\"[PrefixMiddleware] Wrong prefix for this app\".encode()]\n\napp = Flask(__name__)\napp.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix=f\"/{SIDECAR_PREFIX}\")\n#bp = Blueprint(\"bprefix\", __name__, template_folder=\"templates\", static_folder=f\"/{SIDECAR_PREFIX}/static\")\n\n\ndef logger(name='sidecar'):\n log = logging.getLogger(name)\n log_format = '[%(asctime)s] %(levelname)s [%(threadName)s]: %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n logging.basicConfig(format=log_format, datefmt=date_format)\n log.setLevel(logging.DEBUG)\n return log\n\n\nLOG = logger()\n\n\ndef extract_ip(text=\"\") -> str:\n regex = r\"(((25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?))\"\n m = re.search(regex, text)\n return m.group(0)\n\n\ndef dedup(it):\n \"\"\"\n :param it: iterable [(1,3), (1,4), (3,1),(3,4),(4,3)]\n :return: [(1,3),(1,4),(3,4)]\n \"\"\"\n sep = \",\"\n foo = []\n for item in it:\n li = list(item)\n li.sort()\n s = sep.join(str(x) for x in li)\n foo.append(s)\n\n bar = []\n for item in set(foo):\n bar.append([int(x) for x in item.split(sep)])\n return bar\n\n\ndef get_routers(raw_nodes) -> list:\n return [node for node in raw_nodes if node[\"node_type\"] == \"router\"]\n\n\ndef parse_etcd_payload(payload) -> list:\n \"\"\"\n parse etcd API response, and return list like this:\n [\n {'key': '/netswatch/network/subnets/10.14.128.0-20', 'value': '{xxxxxxxxxxxxx}', 'modifiedIndex': 149, 'createdIndex': 149},\n {'key': '/netswatch/network/subnets/10.13.112.0-20', 'value': '{xxxxxxxxxxxxx}', 'modifiedIndex': 102, 'createdIndex': 102}\n ]\n \"\"\"\n data = json.loads(payload)\n return data[\"node\"].get(\"nodes\", [])\n\n\ndef extract_node_info(subnet):\n ip = extract_ip(subnet[\"key\"])\n value = json.loads(subnet[\"value\"])\n org = value[\"Meta\"][\"OrgName\"]\n node_type = value[\"Meta\"][\"NodeType\"]\n meta = {'hostname': value[\"Meta\"][\"NodeName\"], \"host_ip\": value[\"Meta\"][\"HostIP\"]}\n return org, ip, node_type, meta\n\n\ndef load_nodes():\n raw_nodes = []\n try:\n response = requests.get(SUBNET_API, auth=(ETCD_USERNAME, ETCD_PASSWORD), params={\"recursive\": \"true\"})\n if response.status_code == 200:\n idx = 1\n orgs = {}\n\n for node in parse_etcd_payload(response.text):\n org, ip, node_type, meta = extract_node_info(node)\n if org not in orgs:\n orgs[org] = len(orgs) + 1\n grp = orgs[org]\n raw_nodes.append({\n \"id\": idx,\n \"ip\": ip,\n \"org\": org,\n \"group\": grp,\n \"node_type\": node_type,\n \"meta\": meta,\n })\n idx += 1\n else:\n print(\"Error loading nodes\")\n print(response.text)\n except Exception as err:\n print(err)\n\n return raw_nodes\n\n\n\n\n\ndef generate_nodes(raw_nodes):\n nodes = []\n for raw in raw_nodes:\n host_ip = raw[\"meta\"][\"host_ip\"]\n hostname = raw[\"meta\"][\"hostname\"]\n node = {\n \"id\": raw[\"id\"],\n \"label\": f\"{hostname}\",\n \"group\": raw[\"group\"],\n \"title\": f\"

Hostname: {hostname}

Host IP: {host_ip}

Net: {raw['ip']}

\",\n # Add additional info for dashboard\n \"hostip\": host_ip,\n \"location\": raw[\"org\"],\n \"hostname\": hostname,\n \"net\": raw[\"ip\"],\n \"nodetype\": raw[\"node_type\"],\n \"font\": {\n \"color\": NODE_LABEL_COLOR,\n }\n }\n\n if raw[\"node_type\"] == \"router\":\n node[\"size\"] = ROUTER_SIZE\n node[\"color\"] = {\n \"background\": ROUTER_BACKGROUND,\n \"border\": ROUTER_BORDER,\n }\n node[\"label\"] = f\"{raw['org']}({raw['ip']})\"\n node[\"shape\"] = \"square\"\n elif raw[\"node_type\"] == \"internal\":\n node[\"shapeProperties\"] = {\n \"borderDashes\": [5, 5]\n }\n # else:\n # node[\"shape\"] = \"square\"\n\n nodes.append(node)\n return nodes\n\n\ndef generate_edges(raw_nodes):\n edges = []\n routers = get_routers(raw_nodes)\n for rt in routers:\n org = rt[\"org\"]\n router_id = rt[\"id\"]\n for node in raw_nodes:\n if node[\"node_type\"] == \"router\":\n continue\n if node[\"org\"] == org:\n edges.append({\n \"from\": node[\"id\"],\n \"to\": router_id\n })\n # Connect routers\n tmp = dedup(itertools.permutations([r[\"id\"] for r in routers], 2))\n for t in tmp:\n edges.append({\n \"from\": t[0],\n \"to\": t[1],\n \"value\": ROUTER_EDGE_WIDTH,\n \"scaling\": {\n \"min\": 1,\n \"max\": 6,\n }\n })\n return edges\n\n\ndef generate_info(raw_nodes):\n total_nodes = len(raw_nodes)\n router = 0\n node = 0\n internal = 0\n for raw in raw_nodes:\n if raw[\"node_type\"] == \"router\":\n router += 1\n elif raw[\"node_type\"] == \"node\":\n node += 1\n else:\n internal += 1\n\n return {\n \"total\": total_nodes,\n \"router\": router,\n \"node\": node,\n \"internal\": internal\n }\n\n\ndef sync_nodes_subnets():\n LOG.info(\"Synchronize nodes and subnets\")\n all_nodes = {}\n subnets_set = set()\n try:\n response = requests.get(SUBNET_API, auth=(ETCD_USERNAME, ETCD_PASSWORD), params={\"recursive\": \"true\"})\n except Exception as err:\n print(err)\n if response.status_code == 200:\n data = json.loads(response.text)\n try:\n keys = data[\"node\"][\"nodes\"]\n # key[\"keys\"] is like: /coreos.com/network/subnets/10.12.128.0-20\n subnets_set = set([extract_ip(key[\"key\"]) for key in keys])\n\n except KeyError as err:\n LOG.error(\"Key error while processing subnets\")\n LOG.debug(err)\n LOG.debug(data)\n return\n else:\n LOG.error(\"Error while loading subnets\")\n LOG.debug(response.text)\n\n response = requests.get(NODE_API, auth=(ETCD_USERNAME, ETCD_PASSWORD), params={\"recursive\": \"true\"})\n if response.status_code == 200:\n data = json.loads(response.text)\n orgs = data[\"node\"].get(\"nodes\", [])\n if not orgs:\n # If NODE_API contains no \"nodes\", return directly\n return\n for org in orgs:\n try:\n keys = org[\"nodes\"]\n for key in keys:\n net = extract_ip(key[\"key\"])\n all_nodes[net] = key[\"key\"]\n except KeyError:\n LOG.error(f\"Key error while processing {org}\")\n else:\n LOG.error(\"Error while loading nodes\")\n LOG.debug(response.text)\n return\n\n nodes_set = set(all_nodes.keys())\n orphan_nodes = nodes_set - subnets_set\n for orphan in orphan_nodes:\n node = all_nodes[orphan]\n LOG.info(f\"Found orphan node: {node}\")\n url = f\"{ETCD_ENDPOINT}/v2/keys/{node}\"\n\n response = requests.delete(url, auth=(ETCD_USERNAME, ETCD_PASSWORD))\n\n if response.status_code == 200:\n LOG.info(f\"Orphan node {node} deleted\")\n else:\n LOG.critical(f\"Error while deleting orphan node: {node}\")\n LOG.critical(response)\n\n\n# @bp.route('/')\n@app.route('/')\ndef index():\n return render_template(\"index.html\", prefix=SIDECAR_PREFIX)\n\ncors = CORS(app, resources={r\"/grafana-iframe\": {\"origins\": \"*\"}})\n@app.route('/grafana-iframe')\ndef grafana_iframe():\n return render_template(\"grafana.html\", prefix=SIDECAR_PREFIX)\n\n\n@app.route('/topo')\ndef generate_topology():\n raw = load_nodes()\n generate_info(raw)\n topo = {\n \"nodes\": generate_nodes(raw),\n \"edges\": generate_edges(raw),\n \"info\": generate_info(raw),\n }\n return topo\n\n# app.register_blueprint(bp, url_prefix=f\"/{SIDECAR_PREFIX}\")\n\nif __name__ == \"__main__\":\n # flask_thread = threading.Thread(target=app.run, kwargs={\"host\": \"0.0.0.0\"})\n # flask_thread.start()\n # while True:\n # sync_nodes_subnets()\n # time.sleep(LOOP)\n app.run(host=\"0.0.0.0\", debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"48547079","text":"import pandas as pd\nimport matplotlib.pyplot as plt\ndf = pd.read_excel('TB.xls')\ndf1=df[['订单付款时间','买家实际支付金额']]\ndf1 = df1.set_index('订单付款时间') #将“订单付款时间”设置为索引\nplt.rcParams['font.sans-serif']=['SimHei'] #解决中文乱码\n#按年统计数据\ndf_y=df1.resample('AS').sum().to_period('A')\nprint(df_y)\n#按季度统计数据\ndf_q=df1.resample('Q').sum().to_period('Q')\nprint(df_q)\n#绘制子图\nfig = plt.figure(figsize=(8,3))\nax=fig.subplots(1,2)\ndf_y.plot(subplots=True,ax=ax[0])\ndf_q.plot(subplots=True,ax=ax[1])\n#调整图表距上部和底部的空白\nplt.subplots_adjust(top=0.95,bottom=0.2)\nplt.show()","sub_path":"Python数据分析从入门到精通/MR/Code/09/example/06/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"312585279","text":"import requests\nimport re\nimport string\n\n\ndef open_book(book_id): #opens book given an id\n with open(f'{book_id}.txt', encoding='utf-8') as f:\n contents = f.read()\n print(contents)\n return contents\n\ndef characters(contents):\n char_count = 0\n for char in contents:\n if char in string.printable:\n char_count += 1\n return char_count\n\ndef words(contents):\n word_count = contents.split()\n return len(word_count)\n\ndef sentences(contents):\n sentence_count = 0\n for char in contents:\n if char == '.' or char == '?' or char == '!':\n sentence_count += 1\n return sentence_count\n\ndef get_ari(contents):\n ari_scale = {\n 1: {'ages': '5-6', 'grade_level': 'Kindergarten'},\n 2: {'ages': '6-7', 'grade_level': '1st Grade'},\n 3: {'ages': '7-8', 'grade_level': '2nd Grade'},\n 4: {'ages': '8-9', 'grade_level': '3rd Grade'},\n 5: {'ages': '9-10', 'grade_level': '4th Grade'},\n 6: {'ages': '10-11', 'grade_level': '5th Grade'},\n 7: {'ages': '11-12', 'grade_level': '6th Grade'},\n 8: {'ages': '12-13', 'grade_level': '7th Grade'},\n 9: {'ages': '13-14', 'grade_level': '8th Grade'},\n 10: {'ages': '14-15', 'grade_level': '9th Grade'},\n 11: {'ages': '15-16', 'grade_level': '10th Grade'},\n 12: {'ages': '16-17', 'grade_level': '11th Grade'},\n 13: {'ages': '17-18', 'grade_level': '12th Grade'},\n 14: {'ages': '18-22', 'grade_level': 'College'}\n }\n ari = round(4.71 * (characters(contents)/words(contents)) + 0.5 * (words(contents)/sentences(contents)) - 21.43)\n grade = ari_scale[ari]['grade_level']\n age = ari_scale[ari]['ages']\n print(f'The ARI is {ari}\\nThis corresponds to a(n) {grade} level of difficulty that is suitable for an average person {age} years old.')\n return ari\n\n\n\ndef get_books(search):\n print('hey')\n # put the search term into our query string / query parameters\n params = {'query': search}\n # send the request to project gutenberg\n response = requests.get('https://www.gutenberg.org/ebooks/search/', params=params)\n # running regex on the html source code that we get in response\n # to get a list of ids of books that match the search term\n text = response.text\n print(text)\n #book_ids = re.findall(r'\\/ebooks\\/(\\d+)', text)\n #titles = re.findall(r'(.+)<\\/span>', text)\n # titles = titles[4:]\n # print(book_ids)\n # print(len(book_ids))\n # print(len(titles))\n # output = []\n # for i in range(len(book_ids)):\n # output.append({'title': titles[i], 'id': book_ids[i]})\n # return output\n \ndef get_book_text(book_id):\n url = f'https://www.gutenberg.org/files/{book_id}/{book_id}-0.txt'\n response = requests.get(url)\n return response.text\n\ndef save_book(book_id, text):\n start = text.find('***')\n text = text[start:]\n f = open(f'{book_id}.txt', 'x')\n f.write(text)\n f.close()\n\ndef find_new_book():\n search = input('What book would you like to search for? ') # prompt the user for a search term\n books = get_books(search) # list of dictionaries\n # [{'title': 'Pygmalion', 'id': '3825'}, {'title': 'Les Fleurs du Mal. English', 'id': '36098'}\n for i in range(len(books)):\n print(i, books[i]['title'])\n book_id = input('what book would you like to save? enter id: ')\n text = get_book_text(book_id)\n save_book(book_id, text)\n return book_id\n\ndef main(): \n running = True\n while running:\n choice = input(\"would you like to find & save a new book (f) or get ari for existing book (e)?\")\n if choice == 'f':\n find_new_book()\n else: \n book_id = input ('enter your book id number: ')\n contents = open_book(book_id)\n get_ari(contents)\n\n\nmain()","sub_path":"Code/Zach/PDX_6_14/Python/sarah_request_ex.py","file_name":"sarah_request_ex.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"210212244","text":"import pandas as pd\nimport plotly.express as px\nimport os\nimport sys\nfrom datetime import datetime\nimport time\nimport gzip\nimport plotly\nimport shutil\nimport plotly.graph_objects as go\n\n# class GroupBucket(list):\n\n# static_dir contains jpgs html_dir contains html files\nstatic_dir = ''\nhtml_dir = ''\ntotals_csv = ''\nmodule_root = ''\n\ndef setGlobalDataPath(home,codename,chart_type,m_root):\n global static_dir, html_dir, totals_csv, module_root\n html_dir = home+'/data_dir/'+codename+'/'+codename+'_htmls/'+chart_type\n static_dir = home+'/data_dir/'+codename+'/'+codename+'_jps/'+chart_type\n totals_csv = home+ '/data_dir/'+codename+'/total_'+codename+'.csv'\n module_root = m_root\n\n\ndef get_py(py):\n \"\"\"helper function that returns name of df column name for particular percentile (py) of latency\"\"\"\n p_col_name = {\n 50: \"P50_latency(ms)\",\n 90: 'P90_latency(ms)',\n 95: 'P95_latency(ms)',\n 99: 'P99_latency(ms)',\n \"QPS\": \"QPS\"\n }\n return p_col_name[py]\n\n\ndef get_trunk(py):\n \"\"\"helper function that returns name of df column name for particular percentile (py) of latency\"\"\"\n p_col_name = {\n \"P50_latency(ms)\": \"P50ms\",\n 'P90_latency(ms)': \"P90ms\",\n 'P95_latency(ms)': \"P95ms\",\n 'P99_latency(ms)': \"P99ms\"\n }\n return p_col_name[py]\n\n\ndef get_title(p_col_name):\n \"\"\" returns paper-ready y axis name \"\"\"\n\n y_axis_title = {\n \"P50_latency(ms)\": \"50th Percentile Query Latency (ms)\",\n \"P90_latency(ms)\": \"90th Percentile Query Latency (ms)\",\n \"P95_latency(ms)\": \"95th Percentile Query Latency (ms)\",\n \"P99_latency(ms)\": \"99th Percentile Query Latency (ms)\",\n \"QPS\": \"QPS\"\n }\n return y_axis_title[p_col_name]\n\n\ndef getXtitle(input):\n \"\"\" returns paper-ready y axis name \"\"\"\n\n x_axis_title = {\n \"parallel_requests\": \"Concurrent Connections\",\n \"QPS\": \"QPS\"\n }\n return x_axis_title[input]\n\n\nclass LineData():\n color_matrix = {\n \"solr\": plotly.colors.sequential.Reds[6],\n \"elastic\": plotly.colors.sequential.Greens[6],\n \"ideal\": plotly.colors.sequential.Greys[6]\n }\n symbol_matrix = {\n\n 0: {11: \"circle\", 12: \"circle-open\", 21: \"square\", 22: \"square-open\", \"2x\": \"asterisk\", \"1x\": \"asterisk\"},\n 2: {11: \"circle\", 12: \"circle-open\", 21: \"square\", 22: \"square-open\"},\n 4: {11: \"circle\", 12: \"circle-open\", 21: \"square\", 22: \"square-open\"},\n 8: {11: \"circle\", 12: \"circle-open\", 21: \"square\", 22: \"square-open\"}\n }\n\n def __repr__(self):\n return str(self.__dict__)\n\n # input_x == cluster sizes list\n # input_y == average QPS for each cluster size for a given GROUP\n # GROUPNAME == the config for that exp. ee.g. rf_multiple == 4 and Shard == 2 ; GROUP =(4 2)\n # north error == the max QPS for theat cluster for given GROUP\n # south_error == the min QPS for that GROUP and Cluster\n\n def __init__(self, gn, csize=0, load=0, engine=0):\n\n self.csize = csize\n self.input_x = []\n self.input_y = []\n self.GROUPNAME = gn\n self.north_error = []\n self.south_error = []\n self.load = load\n self.name = ''\n self.engine = engine\n\n def getGn(self):\n return self.GROUPNAME\n\n # just gonna save some time and append since this is done in sorted order anyway, so values match across lists\n def setInputX(self, x):\n self.input_x.append(x)\n\n def setInputY(self, y):\n self.input_y.append(y)\n\n def setInputNe(self, ne):\n self.north_error.append(ne)\n\n def setInputSe(self, se):\n self.south_error.append(se)\n\n def setName(self, gn, engine=''):\n the_load = ''\n the_clustersize = ''\n if self.load > 0:\n the_load = \" load=\" + str(self.load) + ' '\n if self.csize > 0:\n the_clustersize = ' clustersize=' + str(self.csize)\n self.name = str(gn)\n\n def setLine(self):\n # clustersize = df.filter(like)\n south = [i - j for i, j in zip(self.input_y, self.south_error)]\n north = [i - j for i, j in zip(self.north_error, self.input_y)]\n max_y = [sum(x) for x in zip(north, self.input_y)]\n # if self.GROUPNAME == \"1x\" or self.GROUPNAME == \"2x\":\n # line_color=dict(color=plotly.colors.sequential.Purples[8])\n # marker_symbol=dict(symbol=0)\n # else:\n line_color = dict(color=LineData.color_matrix[self.engine])\n marker_symbol = dict(symbol=LineData.symbol_matrix[self.csize][self.GROUPNAME], size=11)\n\n data = go.Scatter(\n x=self.input_x,\n y=max_y,\n name=self.name,\n line=line_color,\n mode='lines+markers',\n marker=marker_symbol\n )\n return data\n\n def setLineErrorBars(self):\n # clustersize = df.filter(like)\n south = [i - j for i, j in zip(self.input_y, self.south_error)]\n north = [i - j for i, j in zip(self.north_error, self.input_y)]\n line_color = dict(color=LineData.color_matrix[self.engine])\n marker_symbol = dict(symbol=LineData.symbol_matrix[self.csize][self.GROUPNAME], size=11)\n data = go.Scatter(\n x=self.input_x,\n y=self.input_y,\n error_y=dict(\n type='data',\n symmetric=False,\n array=north,\n arrayminus=south),\n name=self.name)\n\n return data\n\n def setTailLine(self):\n # clustersize = df.filter(like)\n # color_index\n # YlGn\n # Reds\n # Blues\n line_color = dict(color=LineData.color_matrix[self.engine])\n # not using groupname anymore, so we'll have to do something about this later to map to diff lines for now default to circle\n marker= LineData.symbol_matrix[self.csize].get(self.GROUPNAME, 'circle')\n marker_symbol = dict(symbol=marker, size=11)\n data = go.Scatter(\n x=self.input_x,\n y=self.input_y,\n name=self.name,\n line=line_color,\n mode='lines+markers',\n marker=marker_symbol,\n )\n return data\n\n\n# REMEMBER lineList is a List full of line objects with with just the length of the x axis (clustersize.size) and\n# groupname attached to each object\ndef fillClustersizeLine(lineList, df, col, axis_x):\n \"\"\" this function will fill out each CLUSTERSIZE && GROUPNAME linelist object with a list for each x = axis_x\n and y = col \"\"\"\n\n # pandas complains about the parens\n p_trunk = get_trunk(col)\n df.rename(columns={col: p_trunk}, inplace=True)\n for ld in lineList:\n gn = ld.getGn()\n ld.setName(gn)\n df_engine = df.loc[df['engine'] == ld.engine]\n gn_line_df = df_engine.loc[df_engine['GROUP'] == gn]\n gn_csize_df = gn_line_df.loc[gn_line_df[\"clustersize\"] == ld.csize]\n # gn_csize_df is a df for a single line... just need to populte ld now by iterating over the\n # parallel_requests (x) and setting latency as y\n if axis_x == \"QPS\":\n x_set = gn_csize_df.QPS.unique()\n else:\n x_set = gn_csize_df.parallel_requests.unique()\n\n x_set.sort()\n for i in x_set:\n ld.setInputX(i)\n yinput = gn_csize_df.query(axis_x + \" == %s\" % str(i))\n ld.setInputY(yinput.iloc[0][p_trunk])\n\n\ndef fillClustersizeLineQPS(lineList, df, col):\n for ld in lineList:\n gn = ld.getGn()\n ld.setName(gn)\n df_engine = df.loc[df['engine'] == ld.engine]\n gn_line_df = df_engine.loc[df_engine['GROUP'] == gn]\n gn_csize_df = gn_line_df.loc[df[\"clustersize\"] == ld.csize]\n _df = gn_csize_df.loc[df[\"engine\"] == ld.engine]\n # gn_csize_df is a df for a single line... just need to populte ld now by iterating over the\n # parallel_requests (x_set) and setting latency as y\n x_set = _df.parallel_requests.unique()\n x_set.sort()\n for i in x_set:\n ld.setInputX(i)\n yinput = gn_csize_df.query(\"parallel_requests == %s\" % str(i))\n ld.setInputY(yinput.iloc[0]['QPS'])\n\n\ndef fillLineList(lineList, df, c):\n # this loop fills the data structure the plotting library needs to project the results\n for ld in lineList:\n gn = ld.getGn()\n df.sort_values(\"clustersize\")\n _df = df.loc[df['engine'] == ld.engine]\n _df_ = _df.loc[_df['GROUP'] == gn]\n\n # for each point on the line's x axis\n for x_point in _df_.clustersize.unique():\n cluster_spec_data_for_gn = _df_.loc[_df_['clustersize'] == x_point]\n ld.setName(gn, engine=ld.engine)\n ld.setInputX(x_point)\n # these two lines remove the warm cache line\n cluster_spec_data_for_gn.sort_values(\"QPS\", inplace=True)\n # cluster_spec_data_for_gn = cluster_spec_data_for_gn.drop(cluster_spec_data_for_gn.index[0])\n # these set the value for a single error bar on the line\n ld.setInputY(cluster_spec_data_for_gn[c].mean())\n ld.setInputNe(cluster_spec_data_for_gn[c].max())\n ld.setInputSe(cluster_spec_data_for_gn[c].min())\n # print(str(ld))\n\n\ndef cdfFillLineList(lineList, df):\n # this loop fills the data structure the plotting library needs to project the results\n seconds_to_millis_factor = 1000\n\n for ld in lineList:\n gn = ld.getGn()\n ld.setName(gn)\n _df = df.loc[df['engine'] == ld.engine]\n gn_line_df = _df.loc[_df['GROUP'] == gn]\n l_row = gn_line_df.loc[gn_line_df['clustersize'] == ld.csize]\n line_row = l_row.loc[l_row['parallel_requests'] == ld.load]\n fct_values = line_row.iloc[0][\"fcts\"].split(\"--\")\n # print(fct_values)\n percentile_count = 0\n for i in fct_values:\n ld.setInputX(float(i) * seconds_to_millis_factor)\n ld.setInputY(percentile_count)\n percentile_count = percentile_count + 5\n\n\ndef Py_max_throughput(query, codename, py, axis_x):\n \"\"\"Py_max_throughput(query, codename, py, axis_x) -> produces a html plot of y=P(py), x=axis_x for the\n experiments \"\"\"\n\n lat_fig_title = \"\"\n lat_fig_title = \"SolrCloud Tail Latency (Round Robin)\" if query == \"direct\" else \"SolrCloud Tail Latency (SolrJ)\"\n p_col_name = get_py(py)\n total_scale_file = totals_csv\n df = pd.read_csv(total_scale_file)\n df = df.sort_values(\"clustersize\")\n # \n engineList = df.engine.unique()\n clustersizeList = df.clustersize.unique()\n group_array = df.GROUP.unique()\n # this creates a list of line objects for each groupname/clusersize combo\n latLineList = []\n for e in engineList:\n for csize in clustersizeList:\n for gn in group_array:\n latLineList.append(LineData(gn, csize, engine=e))\n\n global static_dir, html_dir\n if py == \"QPS\":\n fillClustersizeLineQPS(latLineList, df, p_col_name)\n # name dirs\n static_dir = static_dir+\"/y-QPS_x-load/\"\n html_dir = html_dir +\"/y-QPS_x-load/\"\n\n else:\n fillClustersizeLine(latLineList, df, p_col_name, axis_x)\n static_dir = static_dir + \"/y-percentileTails_x-throughput/\"\n html_dir = html_dir + \"/y-percentileTails_x-throughput/\"\n\n y_title = get_title(p_col_name)\n x_title = getXtitle(axis_x)\n\n # this creates the line data the lib needs and packs into a list\n lat_data_list = [x.setTailLine() for x in latLineList]\n fig_lat = go.Figure(lat_data_list)\n x_font = dict(\n size=20,\n color=\"#7f7f7f\"\n )\n\n fig_lat.update_layout(\n paper_bgcolor='rgba(255,255,255,255)',\n plot_bgcolor='rgba(255,255,255,255)',\n # title=lat_fig_title,\n xaxis_title=x_title,\n showlegend=True,\n xaxis=dict(\n showgrid=True,\n gridcolor=\"LightGrey\",\n title={\"font\": x_font},\n mirror=True,\n ticks='outside',\n showline=True,\n linewidth=2,\n linecolor=\"black\"\n ),\n yaxis=dict(\n showgrid=True,\n gridcolor=\"LightGrey\",\n title=y_title,\n mirror=True,\n ticks='outside',\n showline=True,\n linewidth=2,\n linecolor=\"black\"\n )\n )\n try:\n os.makedirs(static_dir)\n except FileExistsError:\n print(\"file\" + static_dir + \"exists\\n\\n\\n\")\n # directory already exists\n try:\n os.makedirs(html_dir)\n except FileExistsError:\n print(\"file\" + html_dir + \"exists\\n\\n\\n\")\n\n plotly.offline.plot(fig_lat, filename=html_dir +'/'+ query + \"_P\" + str(py) + \"_\" + axis_x + '.html')\n fig_lat.write_image(static_dir +'/'+ query + \"_P\" + str(py) + \"_\" + axis_x + \".png\")\n\n\ndef display_chart_scaling_errorbar(query, codename):\n qps_fig_title = \"\"\n qps_fig_title = \"SolrCloud Query Throughput (Round Robin)\" if query == \"roundrobin\" else \"SolrCloud Query Throughput (SolrJ)\"\n lat_fig_title = \"\"\n lat_fig_title = \"SolrCloud Tail Latency (Round Robin)\" if query == \"roundrobin\" else \"SolrCloud Tail Latency (SolrJ)\"\n\n total_scale_file = totals_csv\n\n # ideal_path = JANUS_HOME + '/chart/scaling_exp_csvs/ideal_line_direct.csv'\n\n if query == \"client\":\n global module_root\n ideal_path = module_root+'/files/ideal_line.csv'\n\n df = pd.read_csv(total_scale_file)\n ideal_df = pd.read_csv(ideal_path)\n df_QPS = df.append(ideal_df)\n df_QPS = df_QPS.sort_values(\"clustersize\")\n engineList = df_QPS.engine.unique()\n qpsLineList = []\n for e in engineList:\n df_engine = df_QPS.loc[df_QPS['engine'] == e]\n group_array_QPS = df_engine.GROUP.unique()\n for gn in group_array_QPS:\n qpsLineList.append(LineData(gn, engine=e))\n # latLineList = [LineData(x) for x in group_array_TAIL]\n fillLineList(qpsLineList, df_QPS, \"QPS\")\n\n for i in qpsLineList:\n print(i)\n # fillLineList(latLineList, df, \"P95_latency(ms)\")\n\n # this creates the line data the lib needs and packs into a list\n qps_data_list = [x.setLine() for x in qpsLineList]\n # lat_data_list = [ x.setLine() for x in latLineList]\n\n fig_qps = go.Figure(qps_data_list)\n # fig_lat = go.Figure(lat_data_list)\n\n x_font = dict(\n size=20,\n color=\"#7f7f7f\"\n )\n axis_x = \"clustersize\"\n y_title = \"QPS\"\n fig_qps.update_layout(\n paper_bgcolor='rgba(255,255,255,255)',\n plot_bgcolor='rgba(255,255,255,255)',\n # title=lat_fig_title,\n xaxis_title=\"Cluster Size\",\n showlegend=True,\n xaxis=dict(\n showgrid=True,\n gridcolor=\"LightGrey\",\n title={\"font\": x_font},\n mirror=True,\n ticks='outside',\n showline=True,\n linewidth=2,\n linecolor=\"black\"\n ),\n yaxis=dict(\n showgrid=True,\n gridcolor=\"LightGrey\",\n title=y_title,\n mirror=True,\n ticks='outside',\n showline=True,\n linewidth=2,\n linecolor=\"black\"\n )\n )\n\n # fig_qps.show()\n # fig_lat.show()\n\n try:\n os.makedirs(static_dir)\n except FileExistsError:\n print(\"file\" + static_dir + \"exists\\n\\n\\n\")\n # directory already exists\n try:\n os.makedirs(html_dir)\n except FileExistsError:\n print(\"file\" + html_dir + \"exists\\n\\n\\n\")\n\n plotly.offline.plot(fig_qps, filename=html_dir + '/' + query + \"_\" + y_title + \"_\" + axis_x + '.html')\n fig_qps.write_image(static_dir + '/' + query + \"_\" + y_title + \"_\" + axis_x + \".png\")\n\n\ndef cdf_TAIL(query, codename):\n\n total_scale_file = totals_csv\n df = pd.read_csv(total_scale_file)\n df = df.sort_values(\"parallel_requests\")\n pr_unique = df.parallel_requests.unique()\n for chart in pr_unique:\n cdf_fig_title = \"SolrCloud LOAD=\" + str(\n chart) + \" TAIL CDF (Round Robin)\" if query == \"direct\" else \"SolrCloud \" + str(chart) + \" TAIL CDF (SolrJ)\"\n\n cdfLineList = []\n df_pr = df.loc[df['parallel_requests'] == chart]\n for index, row in df_pr.iterrows():\n # there will be a line for each row in the csv\n # def __init__(self, gn, csize=0, load=0):\n gn = row[\"GROUP\"]\n load = row[\"parallel_requests\"]\n csize = row[\"clustersize\"]\n engine = row[\"engine\"]\n cdfLineList.append(LineData(gn, csize, load, engine))\n\n cdfFillLineList(cdfLineList, df_pr)\n\n cdf_data_list = [x.setTailLine() for x in cdfLineList]\n\n fig_cdf = go.Figure(cdf_data_list)\n\n # latLineList = [LineData(gn) for gn in group_array]\n yaxis_title = \"CDF(x) at LOAD = \" + str(chart) + \"connections\"\n xaxis_title = \"Latency (ms)\"\n\n x_font = dict(\n size=20,\n color=\"#7f7f7f\"\n )\n\n fig_cdf.update_layout(\n paper_bgcolor='rgba(255,255,255,255)',\n plot_bgcolor='rgba(255,255,255,255)',\n # title=lat_fig_title,\n xaxis_title=xaxis_title,\n showlegend=True,\n xaxis=dict(\n showgrid=True,\n gridcolor=\"LightGrey\",\n title={\"font\": x_font},\n mirror=True,\n ticks='outside',\n showline=True,\n linewidth=2,\n linecolor=\"black\"\n ),\n yaxis=dict(\n showgrid=True,\n gridcolor=\"LightGrey\",\n title=yaxis_title,\n mirror=True,\n ticks='outside',\n showline=True,\n linewidth=2,\n linecolor=\"black\"\n )\n )\n\n try:\n os.makedirs(static_dir)\n except FileExistsError:\n print(\"file\" + static_dir + \"exists\\n\\n\\n\")\n # directory already exists\n try:\n os.makedirs(html_dir)\n except FileExistsError:\n print(\"file\" + html_dir + \"exists\\n\\n\\n\")\n plotly.offline.plot(fig_cdf, filename=html_dir + '/'+ query + \"_\" + str(chart) + '.html')\n fig_cdf.write_image(static_dir + '/'+query + \"_\" + str(chart) + \".png\")\n\n\nif __name__ == \"__main__\":\n print(\" ***** FINAL STEP: PLOTTING CHART IN BROWSER ******\")\n\n args = sys.argv[1:]\n\n arg_dict = {args[x]: args[x + 1] for x in range(0, len(args) - 1) if x % 2 == 0}\n\n _query = arg_dict.get('--query')\n _experiment_name = arg_dict.get('--experiment_name')\n home_path = arg_dict.get('--home_path')\n chart_type= arg_dict.get('--chart_type')\n m_root= arg_dict.get('--module_root')\n # quick hack to pass in value to legacy global variable\n setGlobalDataPath(home_path,_experiment_name,chart_type,m_root)\n\n cdf_TAIL(_query, _experiment_name)\n\n print(\" ***** FINAL STEP COMPLETE ******\")\n\n sys.exit()\n","sub_path":"jmods/viz/cdf/files/generate_figures.py","file_name":"generate_figures.py","file_ext":"py","file_size_in_byte":18937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"402305345","text":"#!/usr/bin/env vpython\n# Copyright 2014 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\nimport json\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nimport unittest\n\nimport repo_test_util\nfrom repo_test_util import ROOT_DIR\n\n\nclass RecipeRepo(object):\n def __init__(self, recipes_path=''):\n self._root = tempfile.mkdtemp()\n os.makedirs(os.path.join(self._root, 'infra', 'config'))\n self._recipes_cfg = os.path.join(\n self._root, 'infra', 'config', 'recipes.cfg')\n with open(self._recipes_cfg, 'w') as fh:\n json.dump({\n 'api_version': 2,\n 'project_id': 'testproj',\n 'recipes_path': recipes_path,\n 'deps': {\n 'recipe_engine':{\n 'url': ROOT_DIR,\n 'branch': 'master',\n 'revision': 'HEAD'\n }\n }\n }, fh)\n self._recipes_dir = os.path.join(self._root, 'recipes')\n os.mkdir(self._recipes_dir)\n self._modules_dir = os.path.join(self._root, 'recipe_modules')\n os.mkdir(self._modules_dir)\n\n def make_recipe(self, recipe, contents):\n with open(os.path.join(self._recipes_dir, '%s.py' % recipe), 'w') as fh:\n fh.write(contents)\n\n def make_module(self, name, init_contents, api_contents):\n module_root = os.path.join(self._modules_dir, name)\n os.mkdir(module_root)\n with open(os.path.join(module_root, '__init__.py'), 'w') as fh:\n fh.write(init_contents)\n with open(os.path.join(module_root, 'api.py'), 'w') as fh:\n fh.write(api_contents)\n\n @property\n def recipes_cmd(self):\n return [\n os.path.join(ROOT_DIR, 'recipes.py'),\n '--package', self._recipes_cfg,\n '-O', 'recipe_engine=%s' % ROOT_DIR]\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n shutil.rmtree(self._root)\n\nclass ErrorsTest(unittest.TestCase):\n def _test_cmd(self, repo, cmd, asserts=None, retcode=0, engine_args=None):\n engine_args = engine_args or []\n if cmd[0] == 'run':\n _, path = tempfile.mkstemp('result_pb')\n cmd = [cmd[0]] + ['--output-result-json', path] + cmd[1:]\n\n try:\n subp = subprocess.Popen(\n repo.recipes_cmd + engine_args + cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = subp.communicate()\n\n if asserts:\n asserts(stdout, stderr)\n self.assertEqual(\n subp.returncode, retcode,\n '%d != %d.\\nstdout:\\n%s\\nstderr:\\n%s' % (\n subp.returncode, retcode, stdout, stderr))\n\n if cmd[0] == 'run':\n if not os.path.exists(path):\n return\n\n with open(path) as tf:\n raw = tf.read()\n data = None\n if raw:\n data = json.loads(raw)\n return data\n finally:\n if cmd[0] == 'run':\n if os.path.exists(path):\n os.unlink(path)\n\n def test_missing_dependency(self):\n with RecipeRepo() as repo:\n repo.make_recipe('foo', \"\"\"\nDEPS = ['aint_no_thang']\n\"\"\")\n\n def assert_nomodule(stdout, stderr):\n self.assertRegexpMatches(\n stdout + stderr, r'No module named aint_no_thang')\n\n self._test_cmd(\n repo, ['run', 'foo'], retcode=1, asserts=assert_nomodule)\n\n def test_missing_module_dependency(self):\n with RecipeRepo() as repo:\n repo.make_recipe('foo', 'DEPS = [\"le_module\"]')\n repo.make_module('le_module', 'DEPS = [\"love\"]', '')\n\n def assert_nomodule(stdout, stderr):\n self.assertRegexpMatches(stdout + stderr, r'No module named love')\n\n self._test_cmd(\n repo, ['run', 'foo'], retcode=1, asserts=assert_nomodule)\n\n def test_no_such_recipe(self):\n with RecipeRepo() as repo:\n result = self._test_cmd(\n repo, ['run', 'nooope'], retcode=1)\n self.assertIsNotNone(result['failure']['exception'])\n\n def test_syntax_error(self):\n with RecipeRepo() as repo:\n repo.make_recipe('foo', \"\"\"\nDEPS = [ (sic)\n\"\"\")\n\n def assert_syntaxerror(stdout, stderr):\n self.assertRegexpMatches(stdout + stderr, r'SyntaxError')\n\n self._test_cmd(repo, ['test', 'run', '--filter', 'foo'],\n asserts=assert_syntaxerror, retcode=1)\n self._test_cmd(repo, ['test', 'train', '--filter', 'foo'],\n asserts=assert_syntaxerror, retcode=1)\n self._test_cmd(repo, ['run', 'foo'],\n asserts=assert_syntaxerror, retcode=1)\n\n def test_missing_path(self):\n with RecipeRepo() as repo:\n repo.make_recipe('missing_path', \"\"\"\nDEPS = ['recipe_engine/step', 'recipe_engine/path']\n\ndef RunSteps(api):\n api.step('do it, joe', ['echo', 'JOE'], cwd=api.path['bippityboppityboo'])\n\ndef GenTests(api):\n yield api.test('basic')\n\"\"\")\n def assert_keyerror(stdout, stderr):\n self.assertRegexpMatches(\n stdout + stderr, r\"KeyError: 'Unknown path: bippityboppityboo'\",\n stdout + stderr)\n\n self._test_cmd(repo, ['test', 'train', '--filter', 'missing_path'],\n asserts=assert_keyerror, retcode=1)\n self._test_cmd(repo, ['test', 'run', '--filter', 'missing_path'],\n asserts=assert_keyerror, retcode=1)\n self._test_cmd(repo, ['run', 'missing_path'],\n asserts=assert_keyerror, retcode=1)\n\n def test_engine_failure(self):\n with RecipeRepo() as repo:\n repo.make_recipe('print_step_error', \"\"\"\nDEPS = ['recipe_engine/step']\n\nfrom recipe_engine import step_runner\n\ndef bad_print_step(self, step_stream, step, env):\n raise Exception(\"Buh buh buh buh bad to the bone\")\n\ndef GenTests(api):\n pass\n\ndef RunSteps(api):\n step_runner.SubprocessStepRunner._print_step = bad_print_step\n try:\n api.step('Be good', ['echo', 'Sunshine, lollipops, and rainbows'])\n finally:\n api.step.active_result.presentation.status = 'WARNING'\n\"\"\")\n self._test_cmd(repo, ['run', 'print_step_error'],\n asserts=lambda stdout, stderr: self.assertRegexpMatches(\n stdout + stderr,\n r'(?s)Recipe engine bug.*Buh buh buh buh bad to the bone'),\n retcode=2)\n\n def test_missing_method(self):\n with RecipeRepo() as repo:\n repo.make_recipe('no_gen_tests', \"\"\"\ndef RunSteps(api):\n pass\n\"\"\")\n repo.make_recipe('no_run_steps', \"\"\"\ndef GenTests(api):\n pass\n\"\"\")\n\n self._test_cmd(repo, ['run', 'no_gen_tests'],\n asserts=lambda stdout, stderr: self.assertRegexpMatches(\n stdout + stderr,\n r'(?s)misspelled GenTests'),\n retcode=1)\n\n self._test_cmd(repo, ['run', 'no_run_steps'],\n asserts=lambda stdout, stderr: self.assertRegexpMatches(\n stdout + stderr,\n r'(?s)misspelled RunSteps'),\n retcode=1)\n\n def test_unconsumed_assertion(self):\n # There was a regression where unconsumed exceptions would not be detected\n # if the exception was AssertionError.\n\n with RecipeRepo() as repo:\n repo.make_recipe('unconsumed_assertion', \"\"\"\nDEPS = []\n\ndef RunSteps(api):\n pass\n\ndef GenTests(api):\n yield api.test('basic') + api.expect_exception('AssertionError')\n\"\"\")\n self._test_cmd(repo, [\n 'test', 'train', '--filter', 'unconsumed_assertion'],\n asserts=lambda stdout, stderr: self.assertRegexpMatches(\n stdout + stderr, 'Unconsumed'),\n retcode=1)\n\n def test_run_recipe_help(self):\n with RecipeRepo(recipes_path='foo/bar') as repo:\n repo.make_recipe('do_nothing', \"\"\"\nDEPS = []\ndef RunSteps(api):\n pass\n\"\"\")\n subp = subprocess.Popen(\n repo.recipes_cmd + ['run', 'do_nothing'],\n stdout=subprocess.PIPE)\n stdout, _ = subp.communicate()\n self.assertRegexpMatches(\n stdout, r'from the root of a \\'testproj\\' checkout')\n self.assertRegexpMatches(\n stdout, r'\\./foo/bar/recipes\\.py run .* do_nothing')\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"unittests/errors_test.py","file_name":"errors_test.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"258637396","text":"\n\nfrom xai.brain.wordbase.nouns._gale import _GALE\n\n#calss header\nclass _GALES(_GALE, ):\n\tdef __init__(self,): \n\t\t_GALE.__init__(self)\n\t\tself.name = \"GALES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"gale\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_gales.py","file_name":"_gales.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"408566036","text":"# summ_with_recursion.py\n\n### Spurce Mark lutz Learning python 5th edition\n\n\nalist = [1, 2, 3, 4, 5, 7, 8, 9, 10]\n\ndef mysum(L):\n\tif not L:\n\t\treturn 0\n\telse:\n\t\treturn L[0] + mysum(L[1:]) # Call myself recursively\n\n\nprint(mysum(alist))\n\n\n\nalist = [1, 2, 3, 4, 5, 7, 8, 9, 10]\n\ndef mysum2(L):\n\tprint(L) # Alows to see recursiveness\n\tif not L:\n\t\treturn 0\n\telse:\n\t\treturn L[0] + mysum2(L[1:]) # Call myself recursively\n\n\ndef mysum_tern(L):\n\treturn 0 if not L else L[0] + mysum_tern(L[1:]) # Use ternary expression\n\n\ndef mysum3(L):\n\treturn L[0] if len(L) == 1 else L[0] + mysum3(L[1:]) # Any type, assume one\n\n\nprint(mysum(alist))\nprint(mysum2(alist))\nprint(mysum_tern(alist))\nprint(mysum3(alist))\n\n\n### llop vs recursion \n\nsum = 0\nwhile alist:\n\tsum += alist[0]\n\talist = alist[1:]\n\nprint(\"Iterative way sum list elements:\", sum)\n\n\n#end_sum = 0\n#for x in alist: end_sum += x\n#print(\"Iterative comprehension way:\", end_sum)\n\n\"\"\"\nHandling Arbitrary Structures\n\nOn the other hand, recursion—or equivalent explicit stack-based algorithms we’ll\nmeet shortly—can be required to traverse arbitrarily shaped structures. As a simple\nexample of recursion’s role in this context, consider the task of computing the sum of\nall the numbers in a nested sublists structure like this:\n[1, [2, [3, 4], 5], 6, [7, 8]] # Arbitrarily nested sublists\nSimple looping statements won’t work here because this is not a linear iteration. Nested\nlooping statements do not suffice either, because the sublists may be nested to\narbitrary depth and in an arbitrary shape—there’s no way to know how many nested\nloops to code to handle all cases. Instead, the following code accommodates such general\nnesting by using recursion to visit sublists along the way:\n\"\"\"\n\ndef sumtree(L):\n\ttot = 0\n\tfor x in L: # For each item at this level\n\t\tif not isinstance(x, list):\n\t\t\ttot += x # Add numbers directly\n\t\telse:\n\t\t\ttot += sumtree(x) # Recur for sublists\n\treturn tot\n\nL = [1, [2, [3, 4], 5], 6, [7, 8]] # Arbitrary nesting\n\nprint(sumtree(L)) # Prints 36\n# Pathological cases\nprint(\"(right-heavy)\", sumtree([1, [2, [3, [4, [5]]]]])) # Prints 15 (right-heavy)\nprint(\"(left-heavy)\", sumtree([[[[[1], 2], 3], 4], 5])) # Prints 15 (left-heavy)\n\n\n\"\"\"\nRecursion versus queues and stacks\nIt sometimes helps to understand that internally, Python implements recursion by\npushing information on a call stack at each recursive call, so it remembers where it\nmust return and continue later. In fact, it’s generally possible to implement recursivestyle\nprocedures without recursive calls, by using an explicit stack or queue of your\nown to keep track of remaining steps.\nFor instance, the following computes the same sums as the prior example, but uses an\nexplicit list to schedule when it will visit items in the subject, instead of issuing recursive\ncalls; the item at the front of the list is always the next to be processed and summed:\n\n\"\"\"\n\n\ndef sumtree_one(L): # Breadth-first, explicit queue\n\ttot = 0\n\titems = list(L) # Start with copy of top level\n\twhile items:\n\t\tfront = items.pop(0) # Fetch/delete front item\n\t\tif not isinstance(front, list):\n\t\t\ttot += front # Add numbers directly\n\t\telse:\n\t\t\titems.extend(front) # <== Append all in nested list\n\treturn tot\n\n\nprint(\"Summtree 1:\", sumtree(alist))\n\ndef sumtree_two(L): # Depth-first, explicit stack\n\ttot = 0\n\titems = list(L) # Start with copy of top level\n\twhile items:\n\t\tfront = items.pop(0) # Fetch/delete front item\n\t\tif not isinstance(front, list):\n\t\t\ttot += front # Add numbers directly\n\t\telse:\n\t\t\titems[:0] = front # <== Prepend all in nested list\n\treturn tot\n\nprint(\"Summtree_one:\", sumtree_one(alist))\nprint(\"Summtree_two:\", sumtree_two(alist))\n\n\"\"\"\nFor more on the last two examples (and another variant), see file sumtree2.py in the\nbook’s examples. It adds items list tracing so you can watch it grow in both schemes,\nand can show numbers as they are visited so you see the search order. For instance,\nthe breadth-first and depth-first variants visit items in the same three test lists used\nfor the recursive version in the following orders, respectively (sums are shown last):\n\n\nIn general, though, once you get the hang of recursive calls, they are more natural\nthan the explicit scheduling lists they automate, and are generally preferred unless\nyou need to traverse structure in specialized ways. Some programs, for example, perform\na best-first search that requires an explicit search queue ordered by relevance or\nother criteria. If you think of a web crawler that scores pages visited by content, the\napplications may start to become clearer.\t\n\"\"\"\t\n\n\n\"\"\"\nCycles, paths, and stack limits\nAs is, these programs suffice for our example, but larger recursive applications can\nsometimes require a bit more infrastructure than shown here: they may need to avoid\ncycles or repeats, record paths taken for later use, and expand stack space when using\nrecursive calls instead of explicit queues or stacks.\nFor instance, neither the recursive call nor the explicit queue/stack examples in this\nsection do anything about avoiding cycles—visiting a location already visited. That’s\nnot required here, because we’re traversing strictly hierarchical list object trees. If data\ncan be a cyclic graph, though, both these schemes will fail: the recursive call version\nwill fall into an infinite recursive loop (and may run out of call-stack space), and the\nothers will fall into simple infinite loops, re-adding the same items to their lists (and\nmay or may not run out of general memory). Some programs also need to avoid\nrepeated processing for a state reached more than once, even if that wouldn’t lead to a\nloop\n\nTo do better, the recursive call version could simply keep and pass a set, dictionary, or\nlist of states visited so far and check for repeats as it goes. We will use this scheme in\nlater recursive examples in this book:\nif state not in visited:\nvisited.add(state) # x.add(state), x[state]=True, or x.append(state)\n...proceed...\nThe nonrecursive alternatives could similarly avoid adding states already visited with\ncode like the following. Note that checking for duplicates already on the items list\nwould avoid scheduling a state twice, but would not prevent revisiting a state traversed\nearlier and hence removed from that list:\nvisited.add(front)\n...proceed...\nitems.extend([x for x in front if x not in visited])\nThis model doesn’t quite apply to this section’s use case that simply adds numbers in\nlists, but larger applications will be able to identify repeated states—a URL of a previously\nvisited web page, for instance. In fact, we’ll use such techniques to avoid cycles\nand repeats in later examples listed in the next section.\nSome programs may also need to record complete paths for each state followed so\nthey can report solutions when finished. In such cases, each item in the nonrecursive\nscheme’s stack or queue may be a full path list that suffices for a record of states visited,\nand contains the next item to explore at either end.\nAlso note that standard Python limits the depth of its runtime call stack—crucial to\nrecursive call programs—to trap infinite recursion errors. To expand it, use the sys\nmodule:\n>>> sys.getrecursionlimit() # 1000 calls deep default\n1000\n>>> sys.setrecursionlimit(10000) # Allow deeper nesting\n>>> help(sys.setrecursionlimit) # Read more about it\nThe maximum allowed setting can vary per platform. This isn’t required for programs\nthat use stacks or queues to avoid recursive calls and gain more control over the traversal\nprocess\n\n\"\"\"\n\n\"\"\"\nMore recursion examples\nAlthough this section’s example is artificial, it is representative of a larger class of programs;\ninheritance trees and module import chains, for example, can exhibit similarly\ngeneral structures, and computing structures such as permutations can require arbitrarily\nmany nested loops. In fact, we will use recursion again in such roles in more\nrealistic examples later in this book:\n• In Chapter 20’s permute.py, to shuffle arbitrary sequences\n• In Chapter 25’s reloadall.py, to traverse import chains\nIn Chapter 29’s classtree.py, to traverse class inheritance trees\n• In Chapter 31’s lister.py, to traverse class inheritance trees again\n• In Appendix D’s solutions to two exercises at the end of this part of the book:\ncountdowns and factorials\nThe second and third of these will also detect states already visited to avoid cycles and\nrepeats. Although simple loops should generally be preferred to recursion for linear\niterations on the grounds of simplicity and efficiency, we’ll find that recursion is\nessential in scenarios like those in these later examples.\n\n\n\"\"\"\n","sub_path":"Recursion/sum_with_recursion.py","file_name":"sum_with_recursion.py","file_ext":"py","file_size_in_byte":8694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"371050510","text":"data = {\n 'name': 'CilWB',\n 'nickname': 'Cil',\n 'image': 'rabbit.gif',\n 'greeting_msg': 'Hi, there!',\n 'about': \"\"\"\n Cil is a Cil ,and also is Xil or Zil. He has some skills about C and C++. ***just only some***\n \"\"\",\n 'work': {\n 'position': 'Sleeper',\n 'at': 'everwhere'\n },\n 'courses': [\n {\n 'name': 'How to eat like a blackhole',\n 'school': 'CilCil Academy',\n 'url': 'it\\'s a darkweb,'\n },\n {\n 'name': 'POSN BUU CAMP2',\n 'school': 'Burapha university',\n 'url': 'adv-nav'\n }\n ],\n 'skills': [\n 'Eating',\n 'Sleeping'\n ]\n}\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"304445578","text":"#!/usr/bin/python3\n#-*- encode:utf-8 -*-\n\nimport cv2\nimport sys, os, time\nimport skvideo.io\nimport subprocess\nimport pathlib\nimport hashlib\nfrom PIL import ImageFont\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom threading import Thread\nfrom queue import Queue\nimport datetime\nimport io\nimport numpy as np\nimport platform\n\nif 'centos' in platform.dist():\n sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n\nclass Util(object):\n def sec2str(sec):\n m, s = divmod(int(sec), 60)\n h, m = divmod(m, 60)\n return \"%d:%02d:%02d\" % (h, m, s)\n\n def get_time_str():\n return datetime.datetime.now().strftime(\"%Y%m%d%H%M\")\n\n def run_shell_cmd(cmd):\n subprocess.call(cmd, shell=True)\n\n def run_shell_path(path):\n subprocess.call('chmod +x ' + path, shell=True)\n subprocess.call(path, shell=True)\n\n def create_dir(directory):\n pathlib.Path(directory).mkdir(parents=True, exist_ok=True)\n\n def is_centos():\n return 'centos' in platform.dist()\n\n def is_ubuntu():\n return 'Ubuntu' in platform.dist()\n \n def is_mac():\n return sys.platform == 'darwin'\n\n def is_win():\n return sys.platform == 'win32'\n\n def is_linux():\n return sys.platform == 'linux'\n\n def use_cv2_video_func():\n return Util.is_mac() or Util.is_win()\n # return False # tlinux's cv2.VideoCapture dont work\n \n def get_fps(video_path):\n ret = None\n if Util.use_cv2_video_func():\n cap = cv2.VideoCapture(video_path)\n ret = int(cap.get(cv2.CAP_PROP_FPS))\n cap.release()\n else:\n info = skvideo.io.ffprobe(video_path)\n if 'video' in info and '@r_frame_rate' in info['video']:\n ret = int(eval(info['video']['@r_frame_rate']))\n else:\n ret = 0\n return ret\n\n def get_video_sec(video_path):\n result = subprocess.Popen([\"ffprobe\", video_path],\n stdout = subprocess.PIPE, stderr = subprocess.STDOUT)\n for line in result.stdout.readlines():\n s = str(line)\n if 'Duration' in s:\n ts = s.split(',')[0].split(': ')[1]\n print(ts)\n pt = datetime.datetime.strptime(ts, '%H:%M:%S.%f')\n n_sec = pt.second + pt.minute * 60 + pt.hour * 3600\n return n_sec\n return 0\n\n def loop_image_dir(directory, looper, fps, per_sec):\n '''\n @return n_frame\n '''\n i_frame = 0\n for filename in os.listdir(directory):\n if filename.endswith(filename):\n path = os.path.join(directory, filename)\n img = cv2.imread(path)\n i_sec = int(i_frame / fps)\n if looper: looper(img, i_frame, i_sec)\n i_frame += 1 * per_sec * fps\n return i_frame\n\n def get_i_sec_frame(video_path, i_sec):\n stream = cv2.VideoCapture(video_path) if Util.use_cv2_video_func() else skvideo.io.vreader(video_path)\n fps = Util.get_fps(video_path)\n if fps == 0: \n return None\n i = 0\n while True:\n if Util.use_cv2_video_func():\n grabbed, frame = stream.read()\n if not grabbed: \n break\n if i / fps == i_sec:\n return frame\n stream.release()\n i += 1\n else:\n frame = next(stream, None)\n if frame is None:\n break\n frame = frame[...,::-1] # RGB -> BGR\n if i / fps == i_sec:\n return frame\n i += 1\n if Util.use_cv2_video_func():\n stream.release()\n return None\n\n # not fast thought\n def fast_loop_frame(video_path, looper, per_sec, max_sec):\n '''\n @return n_frame\n '''\n i_frame = 0\n fps = Util.get_fps(video_path)\n fvs = FileVideoStream(video_path, queueSize=4096).start()\n time.sleep(5.0)\n while fvs.more():\n time.sleep(0.001)\n frame = fvs.read()\n if i_frame % fps == 0 and (i_frame / fps) % per_sec == 0:\n i_sec = int(i_frame / fps)\n if looper: looper(frame, i_frame, i_sec)\n if (i_frame / fps) > max_sec:\n break\n i_frame += 1\n return i_frame\n\n def loop_frame(video_path, looper, per_sec, max_sec):\n '''\n @return n_frame\n '''\n i_frame = 0\n fps = Util.get_fps(video_path)\n stream = cv2.VideoCapture(video_path) if Util.use_cv2_video_func() else skvideo.io.vreader(video_path)\n while True:\n frame = None\n if Util.use_cv2_video_func():\n grabbed, frame = stream.read()\n if not grabbed:\n break\n else:\n frame = next(stream, None)\n if frame is None:\n break\n frame = frame[...,::-1] # RGB -> BGR\n if i_frame % fps == 0 and (i_frame / fps) % per_sec == 0:\n i_sec = int(i_frame / fps)\n if looper: looper(frame, i_frame, i_sec)\n if (i_frame / fps) > max_sec:\n break\n i_frame += 1\n\n if Util.use_cv2_video_func(): \n stream.release()\n return i_frame\n\n def plot_keras_history(history, plot_path, log_path, model):\n log_path = log_path.replace('detail.txt', 'acc{0:.2f}.txt'.format(history.history['acc'][-1]))\n with open(log_path, 'w') as f:\n for key in ['val_acc', 'acc', 'val_loss', 'loss']:\n try:\n f.write('\\n{}='.format(key))\n nums = history.history[key]\n f.write(','.join(list(map(lambda x:'%.4f' % x, nums))))\n except Exception as e:\n pass\n if Util.is_linux():\n return\n import matplotlib.pyplot as plt\n fig = plt.figure()\n fig.add_subplot(2,2,1)\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'valid'], loc='upper left')\n axes = plt.gca()\n axes.set_ylim([0, 1])\n\n # summarize history for loss\n fig.add_subplot(2,2,2)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'valid'], loc='upper left')\n plt.savefig(plot_path)\n\n def plot_image(image):\n if Util.is_linux():\n return\n import matplotlib.pyplot as plt\n plt.imshow(image)\n plt.show()\n\n def plot_dbs(plot_path, dbs, db_max):\n if Util.is_linux():\n return\n import matplotlib\n import matplotlib.pyplot as plt\n\n n = len(dbs)\n fig, ax = plt.subplots(figsize=(n / 60 * 5, 2.5))\n ax.set_xticks(np.arange(0, n, 10))\n formatter = matplotlib.ticker.FuncFormatter(lambda sec, x: time.strftime('%M:%S', time.gmtime(sec)))\n ax.xaxis.set_major_formatter(formatter)\n x1 = np.arange(0, n)\n y1 = dbs\n x2 = np.arange(0, n)\n y2 = db_max * np.ones((n))\n ax.plot(x1, y1, 'b-', x2, y2, 'r-')\n ax.set_ylabel('dB')\n ax.set_xlabel('sec')\n ax.set_ylim((-120, 0))\n ax.set_xlim((0, n))\n fig.savefig(plot_path)\n\n def rect_smoba_killinfo(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 70 / 480)\n y2 = int(w * 120 / 480)\n x1 = int(h * 300 / 848)\n x2 = int(h * 548 / 848)\n return x1, y1, x2, y2\n\n def rect_smoba_skill_1(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 385 / 480)\n y2 = int(w * 455 / 480)\n x1 = int(h * 600 / 848)\n x2 = int(h * 670 / 848)\n return x1, y1, x2, y2\n\n def rect_smoba_middle_hero(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 190 / 480)\n y2 = int(w * 300 / 480)\n x1 = int(h * 390 / 848)\n x2 = int(h * 460 / 848)\n return x1, y1, x2, y2\n\n def rect_smoba_grid_hero(image, i, j):\n x1, y1, x2, y2 = Util.rect_middle_hero(image)\n dx = i * (x2 - x1)\n dy = j * (y2 - y1)\n return x1 + dx, y1 + dy, x2 + dx, y2 + dy\n\n def rect_pubg_killinfo(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 300 / 450)\n y2 = int(w * 340 / 450)\n x1 = int(h * 300 / 800) \n x2 = int(h * 500 / 800)\n return x1, y1, x2, y2\n\n def rect_pubg_gun(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 380 / 450)\n y2 = int(w * 420 / 450)\n x1 = int(h * 360 / 800)\n x2 = int(h * 440 / 800)\n return x1, y1, x2, y2\n\n def rect_pubg_win(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 0 / 450)\n y2 = int(w * 100 / 450)\n x1 = int(h * 600 / 800) \n x2 = int(h * 800 / 800)\n return x1, y1, x2, y2\n \n def rect_pubg_team(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 350 / 450)\n y2 = int(w * 450 / 450)\n x1 = int(h * 0 / 800) \n x2 = int(h * 100 / 800)\n return x1, y1, x2, y2\n\n def rect_pubg_screen(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = 0\n y2 = int(w)\n x1 = 0\n x2 = int(h)\n return x1, y1, x2, y2\n\n def rect_speedm_drift_button(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 225 / 340)\n y2 = int(w * 305 / 340)\n x1 = int(h * 490 / 605) \n x2 = int(h * 570 / 605)\n return x1, y1, x2, y2\n\n def rect_speedm_drift_info(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 260 / 340)\n y2 = int(w * 290 / 340)\n x1 = int(h * 310 / 605) \n x2 = int(h * 430 / 605)\n return x1, y1, x2, y2\n\n def rect_speedm_record(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 20 / 340)\n y2 = int(w * 100 / 340)\n x1 = int(h * 230 / 605) \n x2 = int(h * 380 / 605)\n return x1, y1, x2, y2\n\n def rect_speedm_win(image):\n w = image.shape[0]\n h = image.shape[1]\n y1 = int(w * 70 / 340)\n y2 = int(w * 190 / 340)\n x1 = int(h * 330 / 605) \n x2 = int(h * 605 / 605)\n return x1, y1, x2, y2\n\n # height, width, channel\n def input_shape_speedm_drift_button():\n return (80, 80, 3)\n\n def input_shape_speedm_drift_info():\n return (30, 120, 3)\n\n def input_shape_speedm_record():\n return (80, 150, 3)\n\n def input_shape_speedm_win():\n return (120, 275, 3)\n\n def input_shape_pubg_killinfo():\n return (40, 200, 3)\n\n def input_shape_pubg_gun():\n return (50, 100, 3)\n \n def input_shape_pubg_win():\n return (50, 100, 3)\n \n def input_shape_pubg_team():\n return (100, 100, 3)\n \n def input_shape_pubg_screen():\n return (45, 80, 3)\n\n def input_shape_smoba_skill_1():\n return (50, 50, 3)\n\n def input_shape_smoba_hero():\n return (90, 70, 3)\n\n def crop_and_resize(image, rect_func, size):\n x1, y1, x2, y2 = rect_func(image)\n image = image[y1:y2, x1:x2]\n return cv2.resize(image, size)\n\n def crop_smoba_skill_1(image):\n return Util.crop_and_resize(image, Util.rect_skill_1, size=(50, 50))\n\n def crop_smoba_middle_hero(image):\n return Util.crop_and_resize(image, Util.rect_middle_hero, size=(70, 90))\n\n def crop_smoba_grid_hero(image, i, j):\n return Util.crop_and_resize(image, Util.rect_grid_hero, size=(70, 90))\n\n def crop_pubg_killinfo(image):\n return Util.crop_and_resize(image, Util.rect_pubg_killinfo, size=(200, 40))\n\n def crop_pubg_gun(image):\n return Util.crop_and_resize(image, Util.rect_pubg_gun, size=(200, 100))\n\n def crop_pubg_win(image):\n return Util.crop_and_resize(image, Util.rect_pubg_win, size=(100, 50))\n\n def crop_pubg_team(image):\n return Util.crop_and_resize(image, Util.rect_pubg_team, size=(100, 100))\n\n def crop_pubg_screen(image):\n return Util.crop_and_resize(image, Util.rect_pubg_screen, size=(160, 90))\n\n def crop_speedm_drift_button(image):\n return Util.crop_and_resize(image, Util.rect_speedm_drift_button, size=(80, 80))\n\n def crop_speedm_drift_info(image):\n return Util.crop_and_resize(image, Util.rect_speedm_drift_info, size=(120, 30))\n\n def relpath(file, path):\n return os.path.join(os.path.dirname(file), path)\n\n def get_file_paths(directory, suffixes):\n paths = []\n for filename in os.listdir(directory):\n for suffix in suffixes:\n if filename.endswith(suffix):\n paths.append(os.path.join(directory, filename))\n return paths\n\n def create_watermark(text, path, video_path):\n # 灰底黑字\n font_size = 40\n pad_top = pad_bottom = 5\n pad_left = pad_right = 10\n font = ImageFont.truetype(\"./resource/font/SourceHanSansCN-Bold.otf\", font_size)\n w0, h0 = (600, font_size * 3)\n img = Image.new(\"RGBA\", (w0, h0), (0, 0, 0, 0))\n draw = ImageDraw.Draw(img)\n textsize = draw.multiline_textsize(text, font)\n rect_x0 = 0\n rect_y0 = 0\n rect_x1 = pad_left + pad_right + textsize[0]\n rect_y1 = pad_top + pad_bottom + textsize[1]\n draw.rectangle([rect_x0, rect_y0, rect_x1, rect_y1], fill=(0,0,0,140), outline=None)\n draw.text((pad_left, pad_top), text, fill=(255,255,255,255), font=font)\n\n # rescale\n frame = Util.get_i_sec_frame(video_path, i_sec=1)\n if frame is not None:\n w1, h1, _ = frame.shape\n if 0 < h1 < 1920:\n scale = h1 / 1920\n img.thumbnail((int(w0 * scale), int(h0 * scale)), Image.ANTIALIAS)\n img.save(path)\n \n # 白字\n # font_size = 40\n # font = ImageFont.truetype(\"./resource/font/SourceHanSansCN-Bold.otf\", font_size)\n # img = Image.new(\"RGBA\", (600, font_size * 3), (0, 0, 0, 0))\n # draw = ImageDraw.Draw(img)\n # draw.text((0, 0), text, (221,221,221), font=font)\n # draw = ImageDraw.Draw(img)\n # img.save(path)\n #\n # 灰底深灰字\n # font_size = 40\n # font = ImageFont.truetype(\"./resource/font/SourceHanSansCN-Bold.otf\", font_size)\n # img = Image.new(\"RGBA\", (600, font_size * 3), (0, 0, 0, 0))\n # draw = ImageDraw.Draw(img)\n # textsize = draw.multiline_textsize(text, font)\n # draw.rectangle([(0, 0), textsize], fill=(180,180,180,150), outline=None)\n # draw.text((0, 0), text, fill=(80,80,80,200), font=font)\n # img.save(path)\n\n def get_md5(text_li):\n h = hashlib.md5()\n for text in text_li:\n h.update(text.encode('utf-8'))\n return h.hexdigest()\n\nclass FileVideoStream:\n def __init__(self, path, queueSize):\n self.stream = cv2.VideoCapture(path) if Util.use_cv2_video_func() else skvideo.io.vreader(path)\n self.stopped = False\n self.queue = Queue(maxsize=queueSize)\n\n def start(self):\n t = Thread(target=self.update, args=())\n t.daemon = True\n t.start()\n return self\n\n def update(self):\n # keep looping infinitely\n while True:\n if self.stopped:\n return\n\n if not self.queue.full():\n frame = None\n if Util.use_cv2_video_func():\n grabbed, frame = self.stream.read()\n if not grabbed:\n self.stop()\n return\n else:\n frame = next(self.stream, None)\n if frame is None:\n self.stop()\n frame = frame[...,::-1] # RGB -> BGR\n self.queue.put(frame)\n\n def read(self):\n return self.queue.get()\n\n def more(self):\n return self.queue.qsize() > 0 or not self.stopped\n\n def stop(self):\n self.stopped = True\n if Util.use_cv2_video_func():\n self.stream.release()\n\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":16605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"487656443","text":"class Node:\n\tdef __init__(self, value):\n\t\tself.visited = False\n\t\tself.value = value\n\t\tself.neighbors = []\n\n\tdef add_neighbor(self, node):\n\t\tself.neighbors.append(node)\n\t\tnode.neighbors.append(self)\n\n\tdef delete_neighbor(self, node):\n\t\tself.neighbors.remove(node)\n\t\tnode.neighbors.remove(self)\n\n\t# debugging\n\tdef __str__(self):\n\t\treturn \"NODE %d: %s\" % (self.value, str(list(map(lambda c: c.value, self.neighbors))))\n\tdef __repr__(self):\n\t\treturn str(self)\n\ndef run():\n\tqueries = int(input().strip())\n\tfor _ in range(queries):\n\t\t[n,m] = list(map(int, input().split()))\n\t\tconnections = []\n\t\tfor i in range(m):\n\t\t\tconnections.append(tuple(map(int, input().split())))\n\t\tfriends = make_graph(connections, n)\n\t\tfriend_groups = [group for group in make_friend_groups(friends) if len(group) > 1]\n\t\ttotal = sum([group_total(group) for group in friend_groups])\n\t\tprint(total)\n\ndef make_graph(connections, n):\n\t# to allow connections to match indexing\n\tgraph = [Node(i) for i in range(n+1)]\n\tfor connection in connections:\n\t\tgraph[connection[0]].add_neighbor(graph[connection[1]])\n\treturn graph[1:]\n\ndef make_friend_groups(friends):\n\tfriend_groups = []\n\tfor friend in friends:\n\t\tif not friend.visited:\n\t\t\tfriend_groups.append(DFS(friend, save=True))\n\treturn friend_groups\n\n\n# to calculate max total for each group:\n\t# reset group (visited = 0 for all nodes)\n\t# total = num_people_in_group * (num_people_in_group - 1)\n\t# run DFS to find cycles\n\t# as soon as cycle is found:\n\t\t# total += num_people_in_group * (num_people_in_group - 1)\n\t\t\t# reasoning: for n people, at most n-1 friendships\n\t\t# delete edge causing cycle\n\t\t# continue DFS until all cyclic edges are removed\n\t\t\t# do not need to start over and reset graph\n\t\t\t# reasoning: removing one back edge will not affect existence of other back edges\n\t\t# when node with 1 child is found, add to leaves list\n\t\t\t# when deleting an edge, if either node has one child now, add to leaves list\n\t# once all cycles are removed:\n\t\t# take first item from leaves list\n\t\t# delete it\n\t\t# update leaves list (look at neighbor)\n\t\t# num_friends in group -= 1\n\t\t# total += num_friends_in_group * (num_friends_in_group - 1)\n\t\t# when num_friends_in_group == 1: break\n\t# return total\n\ndef group_total(group):\n\t# reset group\n\tfor node in group:\n\t\tnode.visited = False\n\tnum_friends = len(group)\n\t\n\t# to avoid accidentally adding a node twice\n\t# if this takes too long I will make this a list and check logic more carefully :)\n\t# DONT NEED JUST KIDDING\n\t# leaves = set()\n\ttotal = num_friends*(num_friends-1)\n\n\t# DFS TO REMOVE CYCLES\n\tstack = [group[0]]\n\twhile stack:\n\t\tnode = stack.pop()\n\t\t\n\t\t# could have been pushed to stack before it was visited\n\t\t# don't want to process twice :)\n\t\tif not node.visited:\n\t\t\tnode.visited = True\n\t\t\tfor neighbor in node.neighbors:\n\t\t\t\t# CYCLE -- update total and delete edge\n\t\t\t\tif neighbor.visited:\n\t\t\t\t\tnode.delete_neighbor(neighbor)\n\t\t\t\t\t# I DO NOT THINK I NEED THIS LOGIC\n\t\t\t\t\t# lets see if it works without\n\t\t\t\t\t# idea: when neighbor finishes, this edge will be gone and it's neighbors count will be 1\n\t\t\t\t\t# if len(node.neighbors) == 1:\n\t\t\t\t\t# \tleaves.add(node)\n\t\t\t\t\t# elif len(neighbor.neighbors) == 1:\n\t\t\t\t\t# \tleaves.add(neighbor)\n\t\t\t\t\ttotal += num_friends*(num_friends-1)\n\t\t\t\telse:\n\t\t\t\t\tstack.append(neighbor)\n\t\t\t# if len(node.neighbors) == 1:\n\t\t\t# \tleaves.add(node)\n\n\t# remove leaves one by one\n\t# don't want to operate on last leaf or we'll get negative friends\n\t# update total for each one\n\t# this is shameful:\n\t# try using it just as a list later\n\n\t# once all cycles are removed:\n\t\t# take first item from leaves list\n\t\t# delete it\n\t\t# update leaves list (look at neighbor)\n\t\t# num_friends in group -= 1\n\t\t# total += num_friends_in_group * (num_friends_in_group - 1)\n\t\t# when num_friends_in_group == 1: break\n\n\t# THIS COULD ACTUALLY BE CALCULATED AUTOMATICALLY -- break cycles\n\t# then do sum(n*n-1 for n in range)\n\t# leaves = list(leaves)\n\t# while len(leaves) > 1:\n\t# \tnode = leaves[0]\n\t# \tleaves.remove(node)\n\t# \t# DEBUGGING\n\t# \tassert len(node.neighbors) == 1\n\n\t# \tneighbor = node.neighbors[0]\n\t# \tnode.remove_neighbor(neighbor)\n\t# \tif len(neighbor.neighbors) == 1:\n\t# \t\tleaves.append(neighbor)\n\t# \tnum_friends -= 1\n\t# \ttotal += num_friends*(num_friends-1)\n\n\treturn total\n\ndef DFS(node, save=False):\n\tif save:\n\t\tnodes = []\n\tstack = [node]\n\twhile stack:\n\t\tnode = stack.pop()\n\t\tnode.visited = True\n\t\tif save:\n\t\t\tnodes.append(node)\n\n\t\tfor neighbor in node.neighbors:\n\t\t\tif not neighbor.visited:\n\t\t\t\tstack.append(neighbor)\n\tif save:\n\t\treturn nodes\n\nrun()\n\n# procedure:\n# for each query:\n\t# add all nodes to graph\n\t# split into groups, delete orphans\n\t\t# use DFS\n\t\t# add nodes to list that are discovered on a single call\n\t# calculate max total for each group\n\t# add totals together and print","sub_path":"HackerRank/Challenges/Week Of Code 28/WIP/total_friends.py","file_name":"total_friends.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"476355613","text":"from django.shortcuts import render\nfrom blog.models import ArticlePost, ArticleColumn\nfrom problemset.models import Problem\n\n\ndef home(request):\n new_article = ArticlePost.objects.all()[:3]\n new_problem = Problem.objects.all()[:3]\n news = ArticlePost.objects.filter(column__title=\"新闻\")[:3]\n print(news)\n context = {\n \"new_article\": new_article,\n \"new_problem\": new_problem,\n \"news\": news\n }\n return render(request, 'home/index.html', context)\n","sub_path":"dhuoj/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"75475891","text":"class Verbet (object):\n\n def __init__(self, word, definition):\n self.word = word\n self.definition = definition\n\nword = raw_input('Add a word: ')\ndefinition = raw_input('Add the definition:')\nverbet = Verbet(word, definition)\nf = open('test.txt', 'a')\nf.write(str(verbet.word + \" : \"))\nf.write(str(verbet.definition + \"\\n\"))\n\n","sub_path":"voca.py","file_name":"voca.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"223723309","text":"def fib(n):\n a, b = 0, 1\n while (a < n):\n print(a)\n a, b = b, a+b\n\ndef fib_gen(n):\n a, b = 0, 1\n while ( a < n ):\n yield a\n a, b = b, a + b\n\nif __name__ == \"__main__\":\n # fib(1000)\n print([x for x in fib_gen(1000)])\n\n","sub_path":"python/src/algo/fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"168830704","text":"\"\"\"Trains a TF-IDF model of all entries and returns a document term matrix.\n\nMultiword expressions are recognized;project specific stopwords eliminated\nall terms the df of which is below 5 removed; documents that are outliers\n(too few or too many resulting features after training) removed; importance\nof features are measured with a RandomForestClassifier and unimportant features\nremoved from the document term matrix.\n\"\"\"\n\nimport os\nimport re\nimport spacy\nimport json\nimport pdb\nimport sys\nimport pandas as pd\nfrom gensim.models.phrases import Phraser\nfrom gensim.models import TfidfModel\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import preprocessing\n\n\n# Add current path to python path\nsys.path.append(os.getcwd())\nfrom Utils import gensim_utils\nimport constants\n\n# Parse files in the input folder\ninput_directory = os.getcwd() + '/' + constants.INPUT_FOLDER + \\\n 'Anglo_Saxon_Chronicles/'\n\n# Set up the output directory\noutput_directory = os.getcwd() + '/' + constants.OUTPUT_FOLDER + \\\n 'Anglo_Saxon_Chronicles/'\n\n# Read the input file\ncomplete_text = open(input_directory + 'anglo_saxon_chronicle.txt').read()\n# Get each year (entries) from the chronicle\nentries = complete_text.split('\\n\\n')\n# Eliminate line breaks from each entry\nentries_without_line_break = [' '.join(element.split('\\n'))\n for element in entries]\n\n# Load the spacy model\nsp = spacy.load('en_core_web_sm')\n\n# Load project specific stopwords,blacklisted pos and tags\nwith open(output_directory +\n 'project_specific_stop_words_pos_tags.json') as json_file:\n project_specific_stop_words = json.load(json_file)\n\nstopwords = project_specific_stop_words['stopwords']\nblacklisted_pos = project_specific_stop_words['blacklisted_pos']\nblacklisted_tags = project_specific_stop_words['blacklisted_tags']\n\n# Load the phraser model trained in the previous step\nphraser_model = Phraser.load(output_directory + 'phraser_model')\n\n# Create a list that will hold the bag of word representation of each entry\nbows = []\n\n# Create a bag of word representation of each entry\nfor i, entry in enumerate(entries_without_line_break):\n # POS tag each entry\n entry_text = sp(entry)\n bow = []\n for sentence in entry_text.sents:\n sentence_element = []\n # Remove blacklisted elements\n for token in sentence:\n if ((token.pos_ not in blacklisted_pos) and\n (token.tag_ not in blacklisted_tags) and (token.lemma_.lower()\n not in stopwords)):\n if '-' in token.lemma_:\n sentence_element.append(token.text)\n else:\n sentence_element.append(token.lemma_)\n # Apply the phraser model to get multi word expressions,update bow\n bow.extend(phraser_model[sentence_element])\n bows.append(bow)\n\npreselected_features = pd.read_csv(output_directory +'historically_specific_vocab.csv')['0'].to_list()\n\n\n# Initialize a gensim dictionary with bows\ngensim_dic = gensim_utils.initialize_gensim_dictionary([preselected_features ])\n\n# Create a TF-IDF based bag of word representation of each entry\n\n\n# Build a gensim corpus\ngensim_corpus = [gensim_dic.doc2bow(text) for text in bows]\n\n# Train a tf-idf model and reuse the gensim dic\nid2word={key: value for (key, value) in enumerate(preselected_features)}\nmodel = TfidfModel(gensim_corpus, id2word=id2word,normalize=True)\n\n# Build a tf-idf corpus\ncorpus_tfidf = model[gensim_corpus]\n\n# Get the matrix representation (matrix_documents) of the TF-IDF corpus\nn_items = len(gensim_dic)\nds = []\nfor doc in corpus_tfidf:\n d = [0] * n_items\n for index, value in doc:\n d[index] = value\n ds.append(d)\nmatrix_documents = np.array(ds)\n\n# Remove entries that have many null values or too many values\nentries_for_remove = []\nfor element in enumerate(matrix_documents):\n if ((np.nonzero(element[1])[0].shape[0] < 5) or\n (np.nonzero(element[1])[0].shape[0] > 100)):\n entries_for_remove.append(element[0])\nmatrix_documents = np.delete(matrix_documents, entries_for_remove, 0)\n\n\n# Create a list from the features (features_list) so that they can be reused\nfeatures_list = [element[1] for element in gensim_dic.iteritems()]\n\n\n\nnp.savetxt(output_directory + 'document_term_matrix_with_preselected_vocab.txt', matrix_documents)\n\n# Create a new entry index by replacing line break with
tag\n# Add line breaks to each entry\nentries = complete_text.split('\\n\\n')\nentries_without_line_break = ['
'.join(element.split('\\n'))\n for element in entries]\n# Remove too long spaces\nentries_without_line_break = [re.sub(' +', ' ', element) for element\n in entries_without_line_break]\n\n# Create an empty index that will hold them\nindex = []\n\npdb.set_trace()\n\n# Remove those entries that were removed from document term matrix\nfor i in sorted(entries_for_remove, reverse=True):\n del entries_without_line_break[i]\n\n# Add the terms in bow of each entry\nfor i, entry in enumerate(entries_without_line_break):\n # Find the non zero element in each row of the document term matrix\n vocab = np.nonzero(matrix_documents[i])[0].tolist()\n # Add them to the index data\n for element in vocab:\n entry = entry + '
' + features_list[element]\n index.append(entry)\n\n# Save the updated index data\nwith open(output_directory + 'index_with_preselected_vocab.json', 'w') as file:\n file.write(json.dumps(index))\n\npdb.set_trace()\n","sub_path":"DataProcessing/AngloSaxonChronicle/Trash/train_tf_idf_model_with_manually_selected_vocab.py","file_name":"train_tf_idf_model_with_manually_selected_vocab.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"71644507","text":"# My first program\n# Date: 206/04/17\n# Author: Aditya Sharma\n\n''' test\n'''\n\nvar_1 = input('Enter 1st variable: ')\nvar_2 = input('Enter 2nd variable: ')\nvar_3 = input('Enter 3rd variable: ')\n\nmax_var = max(var_1, var_2, var_3)\n\nprint (\"Max Number is\", max_var)\n","sub_path":"max_of_3_numbers.py","file_name":"max_of_3_numbers.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"68253692","text":"import sqlite3\nimport datetime\n\ndef addDeveloper(id, name, joiningDate):\n try:\n sqliteConnection = sqlite3.connect('SQLite_Python.db',\n detect_types=sqlite3.PARSE_DECLTYPES |\n sqlite3.PARSE_COLNAMES)\n cursor = sqliteConnection.cursor()\n print(\"Connected to SQLite\")\n\n sqlite_create_table_query = '''CREATE TABLE new_developers (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n joiningDate timestamp);'''\n\n cursor = sqliteConnection.cursor()\n cursor.execute(sqlite_create_table_query)\n\n # insert developer detail\n sqlite_insert_with_param = \"\"\"INSERT INTO 'new_developers'\n ('id', 'name', 'joiningDate') \n VALUES (?, ?, ?);\"\"\"\n\n data_tuple = (id, name, joiningDate)\n cursor.execute(sqlite_insert_with_param, data_tuple)\n sqliteConnection.commit()\n print(\"Developer added successfully \\n\")\n\n # get developer detail\n sqlite_select_query = \"\"\"SELECT name, joiningDate from new_developers where id = ?\"\"\"\n cursor.execute(sqlite_select_query, (1,))\n records = cursor.fetchall()\n\n for row in records:\n developer= row[0]\n joining_Date = row[1]\n print(developer, \" joined on\", joiningDate)\n print(\"joining date type is\", type(joining_Date))\n\n cursor.close()\n\n except sqlite3.Error as error:\n print(\"Error while working with SQLite\", error)\n finally:\n if (sqliteConnection):\n sqliteConnection.close()\n print(\"sqlite connection is closed\")\n\naddDeveloper(1, 'Mark', datetime.datetime.now())\n\n\n# Output:\n#\n# Connected to SQLite\n# Developer added successfully\n#\n# Mark joined on 2019-06-28 20:57:32.352790\n# joining date type is \n# sqlite connection is closed","sub_path":"100_databases/001_sqlite/examples/PYnative/023_read datetime.py","file_name":"023_read datetime.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"537622018","text":"print('I am 6\\'2\" tall.')\r\ntabby_cat = \"\\tI'm tabbed in.\"\r\npersian_cat = \"I'm split\\non a line.\"\r\nbackslash_cat = \"I'm \\\\a\\\\ cat.\"\r\n\r\nfat_cat = '''\r\nI'll do a list:\r\n\\v* Cat food\r\n\\v* Fishies\r\n\\v* Catnip\\n\\v* Grass'''\r\n\r\nprint(tabby_cat)\r\nprint(persian_cat)\r\nprint(backslash_cat)\r\nprint(fat_cat)\r\n\r\n","sub_path":"ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"226870672","text":"#!/usr/bin/env python\n\nr'''\njust prints sh commands to mv URI-not-safe names.\nnames not necessarily are existing paths.\n'''\n\nimport pythonpath; pythonpath\nfrom x19290 import args_or_example, just_print, ArgsOrExampleParser\n\nfrom subprocess import list2cmdline\nfrom sys import argv\nfrom urllib import quote\n\n# quoted 'a' is 'a'. so 'a' is not `mv`ed.\n# quoted 'b c%' is 'b%20c%25' so 'b c%' is `mv`ed to 'b%20c%25'.\n_EXAMPLE = 'a', r'b c%'\n\ndef app(argv):\n parsed = args_or_example(argv, _EXAMPLE, _ArgumentParser)\n just_print(main(parsed.args))\n\nclass _ArgumentParser(ArgsOrExampleParser):\n def __init__(self):\n super(_ArgumentParser, self).__init__()\n self.add_argument(r'args', nargs='*', metavar=r'ARG')\n\ndef main(args):\n for arg in args:\n qarg = quote(arg)\n if qarg == arg:\n continue\n command_list = r'mv', arg, quote(arg)\n yield list2cmdline(command_list)\n\nif __name__ == r'__main__':\n app(argv)\n","sub_path":"urllibquotetest.py","file_name":"urllibquotetest.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"74901338","text":"#import os\r\nimport json\r\n\r\npost_f = \"Posts.xml\"\r\noutput_question_path = \"questionsCodeOrigin/\"\r\noutput_answer_path = \"answersCode/\"\r\noutput_other_answer_path = \"answersCode_highscore/\"\r\n\r\ndict_question_answer = {}\r\ndict_question_answer_highscore = {}\r\n\r\n## Ordered by creation time\r\nwith open(post_f, encoding='utf-8') as f:\r\n for line in f:\r\n line = line.strip()\r\n if not line.startswith(\" 50):\r\n with open(output_other_answer_path+\"post_\"+post_id+\".xml\",\"w\", encoding='utf-8') as outf:\r\n outf.write(line)\r\n dict_question_answer_highscore[question_id] = post_id\r\n else:\r\n continue\r\n with open(output_answer_path+\"post_\"+post_id+\".xml\",\"w\", encoding='utf-8') as outf:\r\n outf.write(line)\r\n \r\n\r\nwith open(\"QAmappingCodeQ.json\",\"w\") as metaf: \r\n metaf.write(json.dumps(dict_question_answer))\r\n\r\nwith open(\"QAmappingCodeQ_highscore.json\",\"w\") as metaf: \r\n metaf.write(json.dumps(dict_question_answer_highscore))\r\n\r\n","sub_path":"src/filterCodeQuestion.py","file_name":"filterCodeQuestion.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"336787598","text":"import numpy as np\nimport os\nimport re\nimport random\nimport math\nimport pandas as pd\nimport utils\n\nclass Reader():\n def __init__(self, config):\n \n self.imgnames = list()\n self.labels = pd.read_csv(config.labels_file, skiprows=[0], usecols=config.usecols, header=None)\n self.labels = np.float32(self.labels.values)\n\n self.imgnames = pd.read_csv(config.labels_file, skiprows=[0], usecols=[0], header=None).values\n\n self.size = len(self.imgnames)\n self.batch_size = config.batch_size\n self.imgs_path = config.imgs_path\n self.lineidx = 0\n self.sample_num = len(self.imgnames)\n self.lable_tpyes_num = len(config.usecols)\n self.current_sample_index = 0\n self.current_sample = list()\n for i in xrange(self.lable_tpyes_num):\n self.current_sample.append([index for index, value in enumerate(np.transpose(self.labels)[i]) if value == 0])\n\n # this method can return the index of every type of sample one by one when fetch random batch\n def get_one_random_balance_index(self):\n rand_index = random.sample(self.current_sample[self.current_sample_index], 1)\n self.current_sample_index = (self.current_sample_index + 1) % self.lable_tpyes_num \n return rand_index\n \n def random_batch(self):\n rand = list()\n for i in xrange(self.batch_size):\n rand.append(self.get_one_random_balance_index()[0])\n batch_imgnames = list()\n for idx in rand:\n batch_imgnames.append(self.imgnames[idx])\n batch_labels = self.labels[rand]\n\n img_list = list()\n for imgname in batch_imgnames:\n path = self.imgs_path + imgname[0] +\".jpg\"\n img = utils.load_image(path)\n img_list.append(img)\n\n batch_imgs = np.reshape(np.stack(img_list), [-1,224,224,3])\n batch_labels = np.reshape(batch_labels, [-1, self.labels.shape[1]])\n return batch_imgs, batch_labels\n\n def batch(self):\n batch_imgnames = list()\n lineidx_upper = self.lineidx + self.batch_size\n if lineidx_upper > self.sample_num:\n lineidx_upper = self.sample_num\n for idx in range(self.lineidx, lineidx_upper):\n batch_imgnames.append(self.imgnames[idx])\n batch_labels = self.labels[self.lineidx:lineidx_upper]\n self.lineidx = lineidx_upper\n if self.lineidx >= self.sample_num:\n self.lineidx = 0\n\n img_list = list()\n for imgname in batch_imgnames:\n path = self.imgs_path + imgname[0] + \".jpg\"\n img = utils.load_image(path)\n img_list.append(img)\n \n batch_imgs = np.reshape(np.stack(img_list), [-1,224,224,3])\n batch_labels = np.reshape(batch_labels, [-1, self.labels.shape[1]])\n return batch_imgs, batch_labels\n def read_one(self):\n if len(self.imgnames[self.lineidx]) > 11:\n if self.imgnames[self.lineidx][12] == '-':\n self.lineidx = self.lineidx+1\n return [],[],True\n else:\n img_name = self.imgnames[self.lineidx][:11]\n else:\n img_name = self.imgnames[self.lineidx]\n label = self.labels[self.lineidx]\n self.lineidx = self.lineidx+1\n\n path = self.imgs_path+img_name+\".jpg\"\n img = utils.load_image(path)\n #print batch_labels\n #print img_list\n img = np.reshape(img, [-1, 224, 224, 3])\n label = np.reshape(label, [-1, self.labels.shape[1]])\n return img, label, False\n\n","sub_path":"vgg/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"291548493","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nfrom scaling_features import *\ndef rmse_cv(model):\n rmse= np.sqrt(-cross_val_score(model, X_train, y, scoring=\"neg_mean_squared_error\", cv = 5))\n return(rmse)\n#LASSO MODEL\nclf1 = LassoCV(alphas = [1, 0.1, 0.001, 0.0005, 5e-4])\nclf1.fit(X_train, y)\nlasso_preds = np.expm1(clf1.predict(X_test))\n#ELASTIC NET\nclf2 = ElasticNet(alpha=0.00005, l1_ratio=0.9)\nclf2.fit(X_train, y)\nelas_preds = np.expm1(clf2.predict(X_test))\n#XGBOOST\nclf3=xgb.XGBRegressor(colsample_bytree=0.4,\n gamma=0.45,\n learning_rate=0.07,\n max_depth=20,\n min_child_weight=1.5,\n n_estimators=500,\n reg_alpha=0.45,\n reg_lambda=0.45,\n subsample=0.95)\nclf3.fit(X_train, y)\nxgb_preds = np.expm1(clf3.predict(X_test))\nfinal_result = 0.45*lasso_preds + 0.25*xgb_preds+0.30*elas_preds\nsolution = pd.DataFrame({\"id\":test.Id, \"SalePrice\":final_result}, columns=['id', 'SalePrice'])\nsolution.to_csv(\"result.csv\", index = False)\n","sub_path":"Library/final_prediction.py","file_name":"final_prediction.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"191389955","text":"# Copyright (c) 2021 Xiaomi Corporation (author: Fangjun Kuang)\n\nimport glob\nimport os\nimport platform\nimport shutil\nimport sys\nfrom pathlib import Path\n\nimport setuptools\nfrom setuptools.command.build_ext import build_ext\n\n\ndef is_for_pypi():\n ans = os.environ.get(\"KALDIFEAT_IS_FOR_PYPI\", None)\n return ans is not None\n\n\ndef is_macos():\n return platform.system() == \"Darwin\"\n\n\ntry:\n from wheel.bdist_wheel import bdist_wheel as _bdist_wheel\n\n class bdist_wheel(_bdist_wheel):\n def finalize_options(self):\n _bdist_wheel.finalize_options(self)\n # In this case, the generated wheel has a name in the form\n # kaldifeat-xxx-pyxx-none-any.whl\n if is_for_pypi() and not is_macos():\n self.root_is_pure = True\n else:\n # The generated wheel has a name ending with\n # -linux_x86_64.whl\n self.root_is_pure = False\n\n\nexcept ImportError:\n bdist_wheel = None\n\n\ndef cmake_extension(name, *args, **kwargs) -> setuptools.Extension:\n kwargs[\"language\"] = \"c++\"\n sources = []\n return setuptools.Extension(name, sources, *args, **kwargs)\n\n\nclass BuildExtension(build_ext):\n def build_extension(self, ext: setuptools.extension.Extension):\n # build/temp.linux-x86_64-3.8\n os.makedirs(self.build_temp, exist_ok=True)\n\n # build/lib.linux-x86_64-3.8\n os.makedirs(self.build_lib, exist_ok=True)\n\n kaldifeat_dir = Path(__file__).parent.parent.resolve()\n\n cmake_args = os.environ.get(\"KALDIFEAT_CMAKE_ARGS\", \"\")\n make_args = os.environ.get(\"KALDIFEAT_MAKE_ARGS\", \"\")\n system_make_args = os.environ.get(\"MAKEFLAGS\", \"\")\n\n if cmake_args == \"\":\n cmake_args = \"-DCMAKE_BUILD_TYPE=Release\"\n\n if make_args == \"\" and system_make_args == \"\":\n print(\"For fast compilation, run:\")\n print('export KALDIFEAT_MAKE_ARGS=\"-j\"; python setup.py install')\n\n if \"PYTHON_EXECUTABLE\" not in cmake_args:\n print(f\"Setting PYTHON_EXECUTABLE to {sys.executable}\")\n cmake_args += f\" -DPYTHON_EXECUTABLE={sys.executable}\"\n\n build_cmd = f\"\"\"\n cd {self.build_temp}\n\n cmake {cmake_args} {kaldifeat_dir}\n\n\n make {make_args} _kaldifeat\n \"\"\"\n print(f\"build command is:\\n{build_cmd}\")\n\n ret = os.system(build_cmd)\n if ret != 0:\n raise Exception(\n \"\\nBuild kaldifeat failed. Please check the error message.\\n\"\n \"You can ask for help by creating an issue on GitHub.\\n\"\n \"\\nClick:\\n\\thttps://github.com/csukuangfj/kaldifeat/issues/new\\n\" # noqa\n )\n\n lib_so = glob.glob(f\"{self.build_temp}/lib/*kaldifeat*.so\")\n lib_so += glob.glob(f\"{self.build_temp}/lib/*kaldifeat*.dylib\") # macOS\n for so in lib_so:\n print(f\"Copying {so} to {self.build_lib}/\")\n shutil.copy(f\"{so}\", f\"{self.build_lib}/\")\n","sub_path":"cmake/cmake_extension.py","file_name":"cmake_extension.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"255714878","text":"\"\"\"\n# Startup script\n\nPython script to set certain states at HA start and notify.\nThis unifies various automations and HA scripts in a simpler one.\n\n\"\"\"\n\n# Create motioneye binary_sensors\ncameras = {\n \"binary_sensor.motioncam_pizero\": \"Vídeo-Mov. en PIzero\",\n \"binary_sensor.motioncam_pizero2\": \"Vídeo-Mov. en PIW2\",\n # 'binary_sensor.motioncam_office': \"Vídeo-Mov. en Office\",\n}\nfor bs, fn in cameras.items():\n hass.states.set(\n bs, \"off\", attributes={\"friendly_name\": fn, \"device_class\": \"motion\"}\n )\n\n# Notify HA init with iOS\nhass.services.call(\n \"notify\",\n \"mobile_app_iphone\",\n {\n \"title\": \"Home-assistant started\",\n \"message\": \"Hassio is now ready for you\",\n \"data\": {\n \"push\": {\n \"badge\": 5,\n \"sound\": \"US-EN-Morgan-Freeman-Welcome-Home.wav\",\n \"category\": \"CONFIRM\",\n }\n },\n },\n)\n","sub_path":"config/python_scripts/start_states.py","file_name":"start_states.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"346682839","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 14 09:55:03 2019\r\n随机密码--密码生成部分\r\n@author: yiran\r\n\"\"\" \r\n\r\n\r\nimport random\r\n\r\nclass G:#全局变量放到这里进行处理\r\n _a=0;\r\n _b=[]\r\n _zifuji=[]\r\n _checkored=\"\"\r\n _wangzhi=[]\r\n _user=[]\r\n \r\n\r\nclass Kongzhi(G):\r\n\r\n def shuru(self):#__init__函数里边的东西在实例化后自动执行\r\n while True:\r\n G._a=input(\"请输入密码位数:\")\r\n try: \r\n int(G._a) \r\n break\r\n except ValueError: \r\n print(\"输入的不是数字!\")\r\n \r\n while True:\r\n G._b=input(\"请输入密码种类:\\n1.小写字母\\n2.大写字母\\n3.数字\\n4.任意字符\\n5.默认字符\\n\")#列表化,把输入的数字转换陈成列表 ??如何让列表中有二位数或多位数\r\n \r\n try: \r\n int(G._b) \r\n if 0', '?', '@', '[', '\\\\', ']', '^', '_','`', '{', '|', '}', '~'] \r\n for i in range(int(a)):\r\n index = int(random.choice(b))\r\n if index == 1 :\r\n _checkored += chr(random.randint(97,122))#小写字母\r\n elif index == 2 :\r\n _checkored += chr(random.randint(65,90))#大写字母\r\n elif index == 3:\r\n _checkored += chr(random.randint(48,57))#数字\r\n elif index ==4 :\r\n _checkored += random.choice(zifuji)\r\n else:\r\n _checkored += random.choice(list)\r\n print(\"密码是:\",_checkored)\r\n G._checkored= _checkored\r\n\r\n\r\n\r\n\r\ng= G()\r\nkongzhi = Kongzhi()\r\nkongzhi.shuru()\r\nkongzhi.shengcheng(g._a,g._b,g._zifuji,G._checkored)\r\n \r\n","sub_path":"MakePassword.py","file_name":"MakePassword.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"140686103","text":"from django.db.models import Count,Avg,Sum,F\nfrom django.db.models.functions import Coalesce\nfrom decimal import Decimal\nfrom django.db import models\nfrom cawreport.models import Cawreport,Item,Customitem,Personalcharge\nfrom project.models import Project,Servicecost\nfrom price.models import Price\nfrom invoice.models import Invoice\n\n\nclass CalculateCawTotal:\n def sum_per_row(self, project):\n sum_row = Cawreport.objects.all().filter(project_id=project) \\\n .annotate(\n total_price=Sum(\n #calculate material cost, each item\n (Coalesce (F('item__price') *\n F('item__quantity') *\n ((F('item__suplement_charges')*Decimal('0.01'))+1)\n ,0\n )\n ),output_field=models.FloatField()\n )\n ).annotate(\n total_price2=Sum(\n #calculate material cost, each item\n (Coalesce (F('customitem__price') *\n F('customitem__quantity') *\n ((F('customitem__suplement_charges')*Decimal('0.01'))+1)\n ,0\n )\n ),output_field=models.FloatField()\n )\n ).annotate(\n total_price3=Sum(\n #calculate material cost, each item\n (Coalesce (F('personalcharge__price') *\n F('personalcharge__hours_worked') *\n ((F('personalcharge__suplement_charges')*Decimal('0.01'))+1)\n ,0\n )\n ),output_field=models.FloatField()\n )\n )\n return sum_row\n\n def sum_caw_row(self,project):\n sum = list(zip(self.customitems_total_caw(project), self.items_total_caw(project)))\n return sum\n\n def sum_row_sql(self,project):\n sumtest = Cawreport.objects.raw('SELECT * FROM aveny_customitems WHERE caw_id IN \\\n (SELECT caw_id FROM aveny_cawreports WHERE %s=project_id)', [project])\n for i in sumtest:\n print(i.price*i.quantity*(Decimal(i.suplement_charges)*Decimal(0.01)+1))\n return sumtest\n\n def customitems_total_caw(self,project):\n sum2 = Cawreport.objects.all().filter(project_id=project).annotate(\n total_price2=Sum(\n #calculate material cost, each item\n (Coalesce (F('customitem__price') *\n F('customitem__quantity') *\n ((F('customitem__suplement_charges')*Decimal('0.01'))+1)\n ,0\n )\n ),output_field=models.FloatField()\n )\n )\n\n return sum2\n\n def count_items_caw(self,cawreport):\n sum1 = Cawreport.objects.all().filter(caw_id=cawreport).count()\n return sum1\n\n def items_total_caw(self,project):\n sum1 = Cawreport.objects.all().filter(project_id=project).annotate(\n total_price=Sum(\n #calculate material cost, each item\n (Coalesce (F('item__price') *\n F('item__quantity') *\n ((F('item__suplement_charges')*Decimal('0.01'))+1)\n ,0\n )\n ),output_field=models.FloatField()\n )\n )\n return sum1\n\n def personalcharge_total_caw(self,project):\n sum3 = Cawreport.objects.all().filter(project_id=project).annotate(\n total_price3=Sum(\n #calculate material cost, each item\n (Coalesce (F('personalcharge__price') *\n F('personalcharge__hours_worked') *\n ((F('personalcharge__suplement_charges')*Decimal('0.01'))+1)\n ,0\n )\n ),output_field=models.FloatField()\n )\n )\n return sum3\n\n def invoice_total_caw(self,project):\n sum4 = Cawreport.objects.all().filter(project_id=project).annotate(\n total_price4=Coalesce(Sum('invoice__invoiced_amount'), 0)\n )\n return sum4\n\n def invoice_total_project(self, project):\n sum = Cawreport.objects.all().filter(project_id=project).aggregate(\n sum_invoice=Coalesce(\n Sum(('invoice__invoiced_amount'), output_field=models.FloatField()), 0\n ) \n )['sum_invoice']\n return sum\n\n\n def accepted_total_project(self, project):\n sum = Cawreport.objects.all().filter(project_id=project).aggregate(\n sum_accepted_cost=Sum(\n ('accepted_cost'),\n output_field=models.FloatField()\n )\n )['sum_accepted_cost']\n return sum\n\n\n def project_total(self, project):\n #get sum of material for project\n material = self.sum_project_material(project)\n #if material gets None as value, set to 0\n m = material if material else 0\n\n customitems = self.sum_project_customitems(project)\n c = customitems if customitems else 0\n\n personalcharge = self.sum_project_personalcharge(project)\n p = personalcharge if personalcharge else 0\n\n sum_total = m + c + p\n return sum_total\n\n\n\n def sum_project_material(self, project):\n sum = Cawreport.objects.all().filter(project_id=project).aggregate(\n total_spent=Sum(\n #total material RSK\n (\n Coalesce(\n F('item__price') *\n F('item__quantity') *\n ((F('item__suplement_charges')*Decimal('0.01'))+1)\n ,0)\n )\n ,\n output_field=models.FloatField()\n )\n )['total_spent']\n return sum\n\n def sum_project_customitems(self, project):\n sum = Cawreport.objects.all().filter(project_id=project).aggregate(\n total_spent=Sum(\n #total material RSK\n (\n Coalesce(\n F('customitem__price') *\n F('customitem__quantity') *\n ((F('customitem__suplement_charges')*Decimal('0.01'))+1)\n ,0)\n )\n ,\n output_field=models.FloatField()\n )\n )['total_spent']\n return sum\n\n def sum_project_personalcharge(self, project):\n sum = Cawreport.objects.all().filter(project_id=project).aggregate(\n total_spent=Sum(\n #total material RSK\n (\n Coalesce(\n F('personalcharge__price') *\n F('personalcharge__hours_worked') *\n ((F('personalcharge__suplement_charges')*Decimal('0.01'))+1)\n ,0)\n )\n ,\n output_field=models.FloatField()\n )\n )['total_spent']\n return sum\n\n\n\n def sum_caw_material(self, cawreport):\n sum = Cawreport.objects.all().filter(caw_id=cawreport).aggregate(\n total_spent=Sum(\n #total material RSK\n (\n Coalesce(\n F('item__price') *\n F('item__quantity') *\n ((F('item__suplement_charges')*Decimal('0.01'))+1)\n ,0)\n )\n ,\n output_field=models.FloatField()\n )\n )['total_spent']\n return sum\n\n def sum_caw_customitems(self, cawreport):\n sum = Cawreport.objects.all().filter(caw_id=cawreport).aggregate(\n total_spent=Sum(\n #total material costomitems\n (\n Coalesce(\n F('customitem__price') *\n F('customitem__quantity') *\n ((F('customitem__suplement_charges')*Decimal('0.01'))+1)\n ,0)\n )\n ,\n output_field=models.FloatField()\n )\n )['total_spent']\n return sum\n\n def sum_caw_personalcharge(self, cawreport):\n sum = Cawreport.objects.all().filter(caw_id=cawreport).aggregate(\n total_spent=Sum(\n #total material personalcharge\n (\n Coalesce(\n F('personalcharge__price') *\n F('personalcharge__hours_worked') *\n ((F('personalcharge__suplement_charges')*Decimal('0.01'))+1)\n ,0)\n )\n ,\n output_field=models.FloatField()\n )\n )['total_spent']\n return sum\n\n\n def sum_caw_total(self, caw):\n material = self.sum_caw_material(caw)\n customitems = self.sum_caw_customitems(caw)\n personalcharge = self.sum_caw_personalcharge(caw)\n sum_total = material + customitems + personalcharge\n return sum_total\n\n\n def material(self, caw):\n material = Item.objects.all().filter(caw_id=caw)\n return material\n\n def personnel(self, caw):\n personnel = Personalcharge.objects.all().filter(caw_id=caw)\n return personnel\n\n def customitems(self, caw):\n customitems = Customitem.objects.all().filter(caw_id=caw)\n return customitems\n\n def caw_information(self, caw):\n info = Cawreport.objects.all().filter(caw_id=caw)\n return info\n","sub_path":"calculations/calculations.py","file_name":"calculations.py","file_ext":"py","file_size_in_byte":10942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"561618973","text":"#!/usr/bin/python3\n\"\"\"routes status of app\"\"\"\nfrom api.v1.views import app_views\nfrom flask import Flask, render_template, jsonify\nimport models\nfrom models import storage\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app_views.route('/status')\ndef status():\n \"\"\"returns status in json format\"\"\"\n return jsonify(status=\"OK\")\n\n\n@app_views.route('/stats', methods=['GET'])\ndef count_objs():\n \"\"\"retrieves number of objects by type\"\"\"\n json_cls = ['amenities', 'cities', 'places', 'reviews', 'states', 'users']\n\n # manually find the classes w/ this list, however, method below is better\n # cls = ['Amentity', 'City', 'Place', 'Review', 'State', 'User']\n\n class_models = sorted(models.CNC)\n class_models.remove('BaseModel')\n\n cls_count = [storage.count(i) for i in class_models]\n obj_dict = {k: v for k, v in zip(json_cls, cls_count)}\n return jsonify(obj_dict)\n","sub_path":"api/v1/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"295649516","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nimport urllib2\n\nclass Company(object):\n\tdef __init__(self, url):\n\t\tself.url = url\n\t\tself.company_list = []\n\t\tself.rules = []\n\tdef crawl(self):\n\t\tsoup = BeautifulSoup(urllib2.urlopen(self.url).read())\n\t\ttable = soup.find_all(\"table\", class_=\"wikitable sortable\")\n\t\tif not table:\n\t\t\treturn\n\t\tfor row in table[0].find_all(\"tr\")[1:]:\n\t\t\ttds = [td for td in row.find_all(\"td\")]\n\t\t\tsymbol = tds[0].a.text\n\t\t\tname = tds[1].text\n\t\t\tgics_sec = tds[2].text\n\t\t\tgics_sub_sec = tds[3].text\n\t\t\tself.company_list.append([symbol, name, gics_sec, gics_sub_sec])\n\n\tdef get_list(self):\n\t\treturn self.company_list\t\n\n\tdef get_rules(self):\n\t\treturn self.rules\n\n\tdef create_rules(self, fined_list_file):\n\t\twith open(fined_list_file) as flf:\n\t\t\tfor line in flf:\n\t\t\t\tinfos = line.strip().split(\"|\")\n\t\t\t\tname = infos[0]\n\t\t\t\trule = \"'%s' OR $%s\" % (infos[1].strip(), name)\n\t\t\t\tself.rules.append(\"%s|%s\" % (name, rule))\n\n\t\t\ndef main():\n\turl = \"http://en.wikipedia.org/wiki/List_of_S%26P_400_companies\"\n\tcompany = Company(url)\n\tcompany.crawl()\n\tcompany_list = company.get_list()\n\tout_file = \"/home/weiwang/workspace/data/sp_400_list.txt\"\n\twith open(out_file, \"w\") as ow:\n\t\tfor com in company_list:\n\t\t\tow.write(\"|\".join(com) + \"\\n\")\n\n\tfined_list_file = \"/home/weiwang/workspace/data/fined_sp_400_list.txt\"\n\tcompany.create_rules(fined_list_file)\n\trules = company.get_rules()\n\trule_file = \"/home/weiwang/workspace/data/company_rules.txt\"\n\twith open(rule_file, \"w\") as rw:\n\t\tfor rule in rules:\n\t\t\trw.write(rule + \"\\n\")\n\t\t\t\n\nif __name__ == \"__main__\":\n\tmain()\t\t\t\n","sub_path":"code/traffic/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"539475096","text":"def get_image_parameters(\n particle_center_x_list=lambda: [0, ],\n particle_center_y_list=lambda: [0, ],\n particle_radius_list=lambda: [3, ],\n particle_bessel_orders_list=lambda: [[1, ], ],\n particle_intensities_list=lambda: [[.5, ], ],\n image_size=lambda: 128,\n image_background_level=lambda: .5,\n signal_to_noise_ratio=lambda: 30,\n gradient_intensity=lambda: .2,\n gradient_direction=lambda: 0,\n ellipsoidal_orientation=lambda: [0, ],\n ellipticity=lambda: 1):\n \"\"\"Get image parameters.\n \n Inputs:\n particle_center_x_list: x-centers of the particles [px, list of real numbers]\n particle_center_y_list: y-centers of the particles [px, list of real numbers]\n particle_radius_list: radii of the particles [px, list of real numbers]\n particle_bessel_orders_list: Bessel orders of the particles\n particle_intensities_list: intensities of the particles [list of real numbers, normalized to 1]\n image_size: size of the image in pixels [px, positive integer]\n image_background_level: background level [real number normalized to 1]\n signal_to_noise_ratio: signal to noise ratio [positive real number]\n gradient_intensity: gradient intensity [real number normalized to 1]\n gradient_direction: gradient angle [rad, real number]\n ellipsoidal_orientation: Orientation of elliptical particles [rad, real number] \n ellipticity: shape of the particles, from spherical to elliptical [real number]\n \n Note: particle_center_x, particle_center_x, particle_radius, \n particle_bessel_order, particle_intensity, ellipsoidal_orientation must have the same length.\n \n Output:\n image_parameters: list with the values of the image parameters in a dictionary:\n image_parameters['Particle Center X List']\n image_parameters['Particle Center Y List']\n image_parameters['Particle Radius List']\n image_parameters['Particle Bessel Orders List']\n image_parameters['Particle Intensities List']\n image_parameters['Image Size']\n image_parameters['Image Background Level']\n image_parameters['Signal to Noise Ratio']\n image_parameters['Gradient Intensity']\n image_parameters['Gradient Direction']\n image_parameters['Ellipsoid Orientation']\n image_parameters['Ellipticity']\n \"\"\"\n\n image_parameters = {}\n image_parameters['Particle Center X List'] = particle_center_x_list()\n image_parameters['Particle Center Y List'] = particle_center_y_list()\n image_parameters['Particle Radius List'] = particle_radius_list()\n image_parameters['Particle Bessel Orders List'] = particle_bessel_orders_list()\n image_parameters['Particle Intensities List'] = particle_intensities_list()\n image_parameters['Image Size'] = image_size()\n image_parameters['Image Background Level'] = image_background_level()\n image_parameters['Signal to Noise Ratio'] = signal_to_noise_ratio()\n image_parameters['Gradient Intensity'] = gradient_intensity()\n image_parameters['Gradient Direction'] = gradient_direction()\n image_parameters['Ellipsoid Orientation'] = ellipsoidal_orientation()\n image_parameters['Ellipticity'] = ellipticity()\n\n return image_parameters\n\n\ndef get_image_parameters_preconfig(image_size=256):\n from numpy.random import uniform, randint\n from numpy import ones\n from math import pi\n\n particle_number = randint(10, 30)\n particle_radius_list = uniform(0.5, 2, particle_number)\n (particle_center_x_list, particle_center_y_list) = get_particle_positions(particle_radius_list, image_size)\n\n particle_bessel_orders_list = []\n particle_intensities_list = []\n\n for i in range(particle_number):\n particle_bessel_orders_list.append([1, ])\n particle_intensities_list.append([uniform(0.05, 0.2, 1), ])\n\n image_parameters = get_image_parameters(\n particle_center_x_list=lambda: particle_center_x_list,\n particle_center_y_list=lambda: particle_center_y_list,\n particle_radius_list=lambda: particle_radius_list,\n particle_bessel_orders_list=lambda: particle_bessel_orders_list,\n particle_intensities_list=lambda: particle_intensities_list,\n image_size=lambda: image_size,\n image_background_level=lambda: uniform(.3, .5),\n signal_to_noise_ratio=lambda: 50,\n gradient_intensity=lambda: uniform(0, 0.1),\n gradient_direction=lambda: uniform(-pi, pi),\n ellipsoidal_orientation=lambda: uniform(-pi, pi, particle_number),\n ellipticity=lambda: 1)\n\n return image_parameters\n\n\ndef get_aug_parameters():\n return dict(rotation_range=0.2,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=0.05,\n zoom_range=0.05,\n horizontal_flip=True,\n fill_mode='nearest')\n\n\ndef get_particle_positions(particle_radius_list=[], image_size=128):\n \"\"\"Generates multiple particle x- and y-coordinates with respect to each other.\n\n Inputs: \n particle_number: number of particles to generate coordinates for\n first_particle_range: allowed x- and y-range of the centermost particle\n other_particle_range: allowed x- and y-range for all other particles\n particle_distance: particle interdistance\n \n Output:\n particles_center_x: list of x-coordinates for the particles\n particles_center_y: list of y-coordinates for the particles\n \"\"\"\n\n from numpy.random import uniform\n from numpy import reshape\n\n particle_centers = []\n for radius in particle_radius_list:\n # print('X is: ' + str(x) + \". Y is: \" + str(y) + \". Radius is: \" + str(radius) + \". Image size is: \" + str(image_size) + '.')\n for i in range(100):\n (x, y) = (uniform(radius, image_size - radius), uniform(radius, image_size - radius))\n if all(((x - coord[0]) ** 2 + (y - coord[1]) ** 2) ** 0.5 > radius for coord in particle_centers):\n particle_centers.append([x, y])\n break\n elif i == 99:\n raise Exception(\"Couldn't place out another particle after 100 tries\")\n particle_centers_x = []\n particle_centers_y = []\n for coordinates in particle_centers:\n particle_centers_x.append(coordinates[0])\n particle_centers_y.append(coordinates[1])\n\n return (particle_centers_x, particle_centers_y)\n\n\ndef get_image(image_parameters):\n \"\"\"Generate image with particles.\n Input:\n image_parameters: list with the values of the image parameters in a dictionary:\n image_parameters['Particle Center X List']\n image_parameters['Particle Center Y List']\n image_parameters['Particle Radius List']\n image_parameters['Particle Bessel Orders List']\n image_parameters['Particle Intensities List']\n image_parameters['Image Size']\n image_parameters['Image Background Level']\n image_parameters['Signal to Noise Ratio']\n image_parameters['Gradient Intensity']\n image_parameters['Gradient Direction']\n image_parameters['Ellipsoid Orientation']\n image_parameters['Ellipticity']\n \n Note: image_parameters is typically obained from the function get_image_parameters()\n \n Output:\n image: image of the particle [2D numpy array of real numbers betwen 0 and 1]\n \"\"\"\n\n from numpy import meshgrid, arange, ones, zeros, sin, cos, sqrt, clip, array, ceil, mean, amax, asarray, amin\n from numpy.random import normal, poisson\n from math import e\n from scipy.special import jv as bessel\n import warnings\n\n particle_center_x_list = image_parameters['Particle Center X List']\n particle_center_y_list = image_parameters['Particle Center Y List']\n particle_radius_list = image_parameters['Particle Radius List']\n particle_bessel_orders_list = image_parameters['Particle Bessel Orders List']\n particle_intensities_list = image_parameters['Particle Intensities List']\n image_size = image_parameters['Image Size']\n image_background_level = image_parameters['Image Background Level']\n signal_to_noise_ratio = image_parameters['Signal to Noise Ratio']\n gradient_intensity = image_parameters['Gradient Intensity']\n gradient_direction = image_parameters['Gradient Direction']\n ellipsoidal_orientation_list = image_parameters['Ellipsoid Orientation']\n ellipticity = image_parameters['Ellipticity']\n\n ### CALCULATE BACKGROUND\n # initialize the image at the background level\n image_background = ones((image_size, image_size)) * image_background_level\n\n # calculate matrix coordinates from the center of the image\n image_coordinate_x, image_coordinate_y = meshgrid(arange(0, image_size),\n arange(0, image_size),\n sparse=False,\n indexing='ij')\n\n # add gradient to image background\n image_background = image_background + gradient_intensity * (image_coordinate_x * sin(gradient_direction) +\n image_coordinate_y * cos(gradient_direction)) / (\n sqrt(2) * image_size)\n\n ### CALCULATE IMAGE PARTICLES\n image_particles = zeros((image_size, image_size))\n particle_intensities_for_SNR = []\n\n # calculate the particle profiles of all particles and add them to image_particles\n\n for particle_center_x, particle_center_y, particle_radius, particle_bessel_orders, particle_intensities, ellipsoidal_orientation in zip(\n particle_center_x_list, particle_center_y_list, particle_radius_list, particle_bessel_orders_list,\n particle_intensities_list, ellipsoidal_orientation_list):\n # calculate coordinates of cutoff window\n start_x = int(max(ceil(particle_center_x - particle_radius * 3), 0))\n stop_x = int(min(ceil(particle_center_x + particle_radius * 3), image_size))\n start_y = int(max(ceil(particle_center_y - particle_radius * 3), 0))\n stop_y = int(min(ceil(particle_center_y + particle_radius * 3), image_size))\n\n # calculate matrix coordinates from the center of the image\n image_coordinate_x, image_coordinate_y = meshgrid(arange(start_x, stop_x),\n arange(start_y, stop_y),\n sparse=False,\n indexing='ij')\n\n # calculate the elliptical distance from the center of the particle normalized by the particle radius\n rotated_distance_x = (image_coordinate_x - particle_center_x) * cos(ellipsoidal_orientation) + (\n image_coordinate_y - particle_center_y) * sin(ellipsoidal_orientation)\n rotated_distance_y = -(image_coordinate_x - particle_center_x) * sin(ellipsoidal_orientation) + (\n image_coordinate_y - particle_center_y) * cos(ellipsoidal_orientation)\n\n # The factor 2 is because the particle radius is defined as the point where the intensity reaches 1/3 of\n # the intensity in the middle of the particle when Bessel order = 0. When Bessel order = 1, the middle of\n # the particle is black, and at the radius the intensity is approximately at its maximum. For higher\n # Bessel orders, there is no clear definition of the radius.\n elliptical_distance_from_particle = 2 * sqrt((rotated_distance_x) ** 2\n + (rotated_distance_y / ellipticity) ** 2\n + .001 ** 2) / particle_radius\n\n # calculate particle profile.\n for particle_bessel_order, particle_intensity in zip(particle_bessel_orders, particle_intensities):\n image_particle = 4 * particle_bessel_order ** 2.5 * (bessel(particle_bessel_order,\n elliptical_distance_from_particle) / elliptical_distance_from_particle) ** 2\n image_particles[start_x:stop_x, start_y:stop_y] = image_particles[start_x:stop_x,\n start_y:stop_y] + particle_intensity * image_particle\n\n # calculate image without noise as background image plus particle image\n image_particles_without_noise = clip(image_background + image_particles, 0, 1)\n\n ### ADD NOISE\n image_particles_with_noise = poisson(\n image_particles_without_noise * signal_to_noise_ratio ** 2) / signal_to_noise_ratio ** 2\n\n cut_off_pixels = tuple([image_particles_with_noise > 1])\n\n percentage_of_pixels_that_were_cut_off = image_particles_with_noise[cut_off_pixels].size / (image_size ** 2) * 100\n\n # warn if there is a pixel brighter than 1\n def custom_formatwarning(msg, *args, **kwargs):\n # ignore everything except the message\n return str(msg) + '\\n'\n\n if percentage_of_pixels_that_were_cut_off > 0:\n warnings.formatwarning = custom_formatwarning\n warn_message = (\"Warning: %.5f%% of the pixels in the generated image are brighter than the 1 (%d pixels)! \"\n \"These were cut-off to the max value 1. Consider adjusting your gradient intensity, particle \"\n \"intensity, background level, or signal to noise ratio.\" % (\n percentage_of_pixels_that_were_cut_off, image_particles_with_noise[cut_off_pixels].size))\n warnings.warn(warn_message)\n\n # print(\"After poisson: Min is %.4f, Max is %.4f\" % (amin(image_particles_with_noise),\n # amax(image_particles_with_noise)))\n\n return clip(image_particles_with_noise, 0, 1)\n\n\ndef draw_image(img):\n from matplotlib import pyplot as plt\n plt.imshow(img, cmap='gray')\n plt.show()\n\n\ndef get_label(image_parameters=get_image_parameters_preconfig()):\n \"\"\"Create and return binary target image given image parameters\n Input: Image parameters\n Output: Array of size (image_x, image_y, number_of_features = 5), where the features at index i are:\n i = 0 - binary image (is there a particle here?)\n i = 1 - delta_x (to the particle center)\n i = 2 - delta_y\n i = 3 - radius\n i = 4 - intensity\n \"\"\"\n\n import numpy as np\n\n particle_center_x_list = image_parameters['Particle Center X List']\n particle_center_y_list = image_parameters['Particle Center Y List']\n particle_radius_list = image_parameters['Particle Radius List']\n image_size = image_parameters['Image Size']\n particle_intensities_list = image_parameters['Particle Intensities List']\n\n target_binary_image = np.zeros((image_size, image_size, 5))\n\n for particle_index in range(0, len(particle_center_x_list)):\n center_x = particle_center_x_list[particle_index]\n center_y = particle_center_y_list[particle_index]\n radius = particle_radius_list[particle_index]\n intensity = particle_intensities_list[particle_index]\n\n # loops over all pixels with center in coordinates = [ceil(center - radius): floor(center + radius)]. Adds the ones with\n # center within radius.\n for pixel_x in range(int(np.floor(center_x - radius)), int(np.ceil(center_x + radius))):\n for pixel_y in range(int(np.floor(center_y - radius)), int(np.ceil(center_y + radius))):\n if ((pixel_x - center_x) ** 2 + (pixel_y - center_y) ** 2 <= radius ** 2):\n # print('Pixel_x is: ' + str(pixel_x) + \". Pixel_y is: \" + str(pixel_y) + \".\")\n target_binary_image[pixel_x, pixel_y, 0] = 1\n target_binary_image[pixel_x, pixel_y, 1] = center_x - pixel_x\n target_binary_image[pixel_x, pixel_y, 2] = center_y - pixel_y\n target_binary_image[pixel_x, pixel_y, 3] = radius\n target_binary_image[pixel_x, pixel_y, 4] = intensity[0]\n\n return target_binary_image\n\n\ndef get_batch(get_image_parameters=lambda: get_image_parameters_preconfig(),\n batch_size=32):\n from numpy import zeros\n import time\n\n example_image_parameters = get_image_parameters()\n image_size = example_image_parameters['Image Size']\n image_batch = zeros((batch_size, image_size, image_size,\n 1)) # possibly save in smaller format? + Preallocating assumes equal image-sizes!\n label_batch = zeros((batch_size, image_size, image_size,\n 5)) # possibly save in smaller format? + Preallocating assumes equal image-sizes!\n\n t = time.time()\n for i in range(batch_size):\n image_parameters = get_image_parameters()\n image_batch[i, :, :, 0] = get_image(image_parameters)\n label_batch[i, :, :, 0:5] = get_label(image_parameters)\n\n time_taken = time.time() - t\n\n print(\"Time taken for batch generation of size \" + str(batch_size) + \": \" + str(time_taken) + \" s.\")\n\n return image_batch, label_batch\n\n\ndef save_batch(batch, image_path='data', label_path='data', image_filename='image', label_filename='label'):\n import cv2\n import numpy as np\n import os\n\n (image_batch, label_batch) = batch\n (batch_size) = image_batch.shape[0]\n if not os.path.isdir(image_path):\n os.mkdir(image_path)\n print(\"Created path \" + image_path)\n if not os.path.isdir(label_path):\n os.mkdir(label_path)\n print(\"Created path \" + label_path)\n\n for i in range(batch_size):\n image = (image_batch[i] * 255).astype(np.uint8)\n cv2.imwrite(\"%s/%s%d.png\" % (image_path, image_filename, i), image)\n np.save(\"%s/%s%d\" % (label_path, label_filename, i), label_batch[i])\n\n return\n\n\ndef load_batch(batch_size, image_path='data', label_path='data', image_filename='image', label_filename='label'):\n from skimage.io import imread\n import numpy as np\n\n image_shape = imread(\"%s/%s%d.png\" % (image_path, image_filename, 0)).shape\n label_shape = np.load(\"%s/%s%d.npy\" % (label_path, label_filename, 0)).shape\n image_batch = np.zeros((batch_size,) + image_shape + (1,))\n label_batch = np.zeros((batch_size,) + label_shape)\n\n for j in range(batch_size):\n image_batch[j, :, :, 0] = imread(\"%s/%s%d.png\" % (image_path, image_filename, j))/255\n label_batch[j] = np.load(\"%s/%s%d.npy\" % (label_path, label_filename, j))\n\n return image_batch, label_batch\n\n\ndef generator_for_training(get_batch=lambda: get_batch(), aug_parameters=get_aug_parameters()):\n from keras.preprocessing.image import ImageDataGenerator\n import numpy as np\n (image_batch, label_batch) = get_batch()\n image_shape = image_batch.shape\n image_batch = np.reshape(image_batch, (image_shape[0], image_shape[1], image_shape[2], 1))\n label_batch = np.reshape(label_batch,\n (image_shape[0], image_shape[1], image_shape[2], 1)) # Expects a color channel\n print(image_batch.shape)\n\n data_generator = ImageDataGenerator(**aug_parameters)\n return data_generator.flow(image_batch, label_batch, batch_size=32)\n # Som jag fattar det, batch size här är hur många augmenterade bilder den genererar från grunddatan\n\n\ndef generator_for_training_load(image_path, label_path, aug_parameters=get_aug_parameters()):\n from keras.preprocessing.image import ImageDataGenerator\n image_datagenerator = ImageDataGenerator(**aug_parameters)\n label_datagenerator = ImageDataGenerator(**aug_parameters)\n\n # Provide the same seed and keyword arguments to the fit and flow methods\n seed = 1\n image_generator = image_datagenerator.flow_from_directory(\n image_path,\n class_mode=None,\n seed=seed)\n\n label_generator = label_datagenerator.flow_from_directory(\n label_path,\n class_mode=None,\n seed=seed)\n\n # combine generators into one which yields image and masks\n return zip(image_generator, label_generator)\n\n\ndef get_batch_load(filename):\n from skimage import io\n from numpy import reshape\n image_batch = io.imread(filename)\n image_batch = reshape(image_batch, (image_batch.shape[0], image_batch.shape[1], image_batch.shape[2], 1))\n return image_batch\n\n\ndef get_padding(input_size, n):\n \"\"\"Adds padding to the input image\n Inputs:\n input: the input image\n input_size: the size of the input image\n n: the input image dimensions are changed to be divisible by 2**n\n\n Outputs:\n padding: the padding that was used\n \"\"\"\n C0 = 2 ** (n - 1)\n C1 = 2 ** (n - 1)\n if (input_size[0] % 8 != 0):\n top_pad = (input_size[0] % (2 * n) // 2)\n bottom_pad = (input_size[0] % (2 * n) - top_pad)\n else:\n top_pad = 0\n bottom_pad = 0\n C0 = 0\n if input_size[1] % 8 != 0:\n left_pad = (input_size[1] % (2 * n) // 2)\n right_pad = (input_size[1] % (2 * n) - left_pad)\n else:\n left_pad = 0\n right_pad = 0\n C1 = 0\n padding = ((C0 - top_pad, C0 - bottom_pad), (C1 - left_pad, C1 - right_pad))\n\n return (padding)\n\n\ndef create_data_generator(get_image_parameters=lambda: get_image_parameters_preconfig(),\n epoch_batch_size=1000,\n batch_size=32,\n len=100):\n from keras.utils import Sequence\n\n class DataGenerator(Sequence):\n \"\"\"\n At the beginning of each epoch, generates a batch of size epoch_batch_size using get_image_parameters. Then,\n for each step, outputs a batch of size batch_size. This is done at most len times.\n \"\"\"\n\n def __init__(self,\n get_image_parameters=lambda: get_image_parameters_preconfig(),\n epoch_batch_size=1000,\n batch_size=32,\n len=100):\n 'Initialization'\n self.get_image_parameters = get_image_parameters\n self.epoch_batch_size = epoch_batch_size\n self.on_epoch_end()\n self.len = len\n self.batch_size = batch_size\n\n def on_epoch_end(self):\n self.batch = get_batch(self.get_image_parameters, self.epoch_batch_size)\n image_batch, label_batch = self.batch\n from matplotlib import pyplot as plt\n plt.imshow(image_batch[0, :, :, 0], cmap='gray')\n plt.show()\n plt.imshow(label_batch[0, :, :, 0], cmap='gray')\n plt.show()\n\n def __len__(self):\n return (self.len)\n\n def __getitem__(self, index):\n from random import randint\n image_indices = [randint(0, self.epoch_batch_size - 1) for i in range(self.batch_size)]\n image_batch, label_batch = self.batch\n return image_batch[image_indices], label_batch[image_indices]\n\n return DataGenerator(get_image_parameters, epoch_batch_size, batch_size, len)\n\n\ndef get_particle_centers(label):\n from skimage import measure\n from statistics import mean\n from numpy import argwhere\n (label_id, number_of_particles) = measure.label(label[:, :, 0], return_num=True)\n # Bra namn\n x_mean_list = []\n y_mean_list = []\n r_mean_list = []\n i_mean_list = []\n for particle_id in range(1, number_of_particles + 1):\n x_list = []\n y_list = []\n r_list = []\n i_list = []\n coords = argwhere(label_id == particle_id)\n for coord in coords:\n x_list.append(coord[0] + label[coord[0], coord[1], 1])\n y_list.append(coord[1] + label[coord[0], coord[1], 2])\n r_list.append(label[coord[0], coord[1], 3])\n i_list.append(label[coord[0], coord[1], 4])\n x_mean_list.append(mean(x_list))\n y_mean_list.append(mean(y_list))\n r_mean_list.append(mean(r_list))\n i_mean_list.append(mean(i_list))\n return (x_mean_list, y_mean_list, r_mean_list, i_mean_list)\n","sub_path":"DeepTrack 1.0/imageGeneration.py","file_name":"imageGeneration.py","file_ext":"py","file_size_in_byte":23955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"650506392","text":"import os\n\nimport cv2\n\nfrom processing import detector\nfrom processing import draw\nimport settings\n\n\nif __name__ == '__main__':\n for img_name in os.listdir(settings.PEOPLE_DIR):\n img_path = os.path.join(settings.PEOPLE_DIR, img_name)\n img = cv2.imread(img_path)\n for face, eyes in detector.detect_faces_and_eyes(img):\n draw.draw_rect(img, face)\n for eye in eyes:\n draw.draw_rect(img, eye)\n faces_landmarks = detector.detect_faces_with_landmarks(img)\n for face_landmarks in faces_landmarks:\n draw.draw_face_by_landmarks(img, face_landmarks)\n cv2.imshow(img_name, img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"352695872","text":"from __future__ import unicode_literals\nimport hashlib\n\nfrom django import forms\n\n\nNO_MORE_CONFIRMATION = 0\nNEW = 1\nACCEPTED = 2\nREJECTED = 3\nCANCELED = 4\n\nSTATUS_CHOICES = map(lambda c: (c, c), (\n NO_MORE_CONFIRMATION,\n NEW,\n ACCEPTED,\n REJECTED,\n CANCELED\n))\n\n\nclass ProcessPaymentForm(forms.Form):\n\n status = forms.ChoiceField(choices=(('OK', 'OK'), ('FAIL', 'FAIL')))\n id = forms.IntegerField()\n control = forms.IntegerField()\n t_id = forms.CharField()\n amount = forms.DecimalField()\n email = forms.EmailField(required=False)\n t_status = forms.TypedChoiceField(coerce=int, choices=STATUS_CHOICES)\n description = forms.CharField(required=False)\n md5 = forms.CharField()\n\n def __init__(self, payment, pin, **kwargs):\n super(ProcessPaymentForm, self).__init__(**kwargs)\n self.pin = pin\n self.payment = payment\n\n def clean(self):\n cleaned_data = super(ProcessPaymentForm, self).clean()\n if not self.errors:\n key_vars = (\n self.pin,\n str(cleaned_data['id']),\n str(cleaned_data['control']),\n str(cleaned_data['t_id']),\n str(cleaned_data['amount']),\n cleaned_data.get('email', ''),\n '', # service\n '', # code\n '', # username\n '', # password\n str(cleaned_data['t_status']))\n key = ':'.join(key_vars)\n md5 = hashlib.md5()\n md5.update(key.encode('utf-8'))\n key_hash = md5.hexdigest()\n if key_hash != self.cleaned_data['md5']:\n self._errors['md5'] = self.error_class(['Bad hash'])\n if cleaned_data['control'] != self.payment.id:\n self._errors['control'] = self.error_class(['Bad payment id'])\n return cleaned_data\n\n def save(self, *args, **kwargs):\n status = self.cleaned_data['t_status']\n self.payment.transaction_id = self.cleaned_data['t_id']\n self.payment.save()\n payment_status = self.payment.status\n if status == ACCEPTED:\n self.payment.captured_amount = self.payment.total\n self.payment.change_status('confirmed')\n elif ((status == NO_MORE_CONFIRMATION and payment_status == 'waiting')\n or status == REJECTED or status == CANCELED):\n self.payment.change_status('rejected')\n","sub_path":"pysar_payments/dotpay/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"444545259","text":"#!/usr/bin/env python\n# Copyright 2014 Rackspace\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS\n# IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\nimport sys\n\nimport setuptools\n\n\n# Utility function to read the README file\ndef readfile(filename):\n with open(filename) as f:\n return f.read()\n\n\n# Utility function to read requirements.txt files\ndef readreq(filename):\n result = []\n with open(filename) as f:\n for line in f:\n line = line.strip()\n\n # Process requirement file references\n if line.startswith('-r '):\n subfilename = line.split(None, 1)[-1].split('#', 1)[0].strip()\n if subfilename:\n result += readreq(subfilename)\n continue\n\n # Strip out \"-e\" prefixes\n if line.startswith('-e '):\n line = line.split(None, 1)[-1]\n\n # Detect URLs in the line\n idx = line.find('#egg=')\n if idx >= 0:\n line = line[idx + 5:]\n\n # Strip off any comments\n line = line.split('#', 1)[0].strip()\n\n # Save the requirement\n if line:\n result.append(line.split('#', 1)[0].strip())\n\n return result\n\n\n# Invoke setup\nsetuptools.setup(\n name='timid-github',\n version='0.1.0',\n author='Kevin L. Mitchell',\n author_email='kevin.mitchell@rackspace.com',\n url='https://github.com/rackerlabs/timid-github',\n description='Timid Github extension',\n long_description=readfile('README.rst'),\n license='Apache License (2.0)',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n py_modules=['timid_github'],\n install_requires=readreq('requirements.txt'),\n tests_require=readreq('test-requirements.txt'),\n entry_points={\n 'timid.extensions': [\n 'timid-github = timid_github:GithubExtension',\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"569491811","text":"#!/usr/bin/env python\n\n__copyright__ = 'Copyright 2013-2014, http://radical.rutgers.edu'\n__license__ = 'MIT'\n\nimport os\nimport unittest\nimport radical.pilot as rp # noqa\nimport radical.utils as ru # noqa\n\n# ------------------------------------------------------------------------------\n#\n# READ the RADICAL-Pilot documentation: http://radicalpilot.readthedocs.org/\n#\n# ------------------------------------------------------------------------------\n\n#######################################\n# TestProjectUser #\n#######################################\n\n\nclass AcceptanceTests(unittest.TestCase):\n \"\"\"Implements the '00_getting_started.py' example in unittest\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Initialize tests, just creates instance variables needed.\"\"\"\n super(AcceptanceTests, cls).setUpClass()\n\n # Set-up the resource, hard-coding 'localhost' for now...\n cls.resource = None\n\n # Create a new session. No need to try/except this: if session creation\n # fails, there is not much we can do anyways...\n cls.session = None\n # Add a Pilot Manager. Pilot managers manage one or more ComputePilots.\n cls.pmgr = None\n # Create a UnitManager object.\n cls.umgr = None\n\n # Read in configuration\n cls.config = ru.read_json('%s/config.json' %\n os.path.dirname(os.path.abspath(__file__)))\n\n # Number of Compute Units (CUs)\n cls.n = 128 # number of units to run\n\n def setUp(self):\n \"\"\" Getting the resources is slow, to avoid calling it for each\n test use setUpClass() and store the result as class variable\n \"\"\"\n # Set-up the resource, hard-coding 'localhost' for now...\n self.resource = 'local.localhost'\n\n # Create a new session. No need to try/except this: if session creation\n # fails, there is not much we can do anyways...\n self.session = rp.Session()\n # Add a Pilot Manager. Pilot managers manage one or more ComputePilots.\n self.pmgr = rp.PilotManager(session=self.session)\n # Create a UnitManager object.\n self.umgr = rp.UnitManager(session=self.session)\n\n # Define an [n]-core local pilot that runs for [x] minutes\n # Here we use a dict to initialize the description object\n self.pd_init = {\n 'resource': self.resource,\n 'runtime': 15, # pilot runtime (min)\n 'exit_on_error': True,\n 'project': self.config[self.resource]['project'],\n 'queue': self.config[self.resource]['queue'],\n 'access_schema': self.config[self.resource]['schema'],\n 'cores': self.config[self.resource]['cores'],\n }\n\n def test_00_getting_started(self):\n \"\"\"Test a standard pilot run\"\"\"\n\n # Create description object from template description\n pilot_desc = rp.ComputePilotDescription(self.pd_init)\n\n # Launch the pilot.\n pilot = self.pmgr.submit_pilots(pilot_desc)\n\n self.umgr.add_pilots(pilot)\n\n # Create a workload of ComputeUnits.\n # Each compute unit runs '/bin/date'.\n cuds = list()\n for i in range(0, self.n):\n # create a new CU description, and fill it.\n # Here we don't use dict initialization.\n cud = rp.ComputeUnitDescription()\n cud.executable = '/bin/date'\n cuds.append(cud)\n\n # Submit the previously created ComputeUnit descriptions to the\n # PilotManager. This will trigger the selected scheduler to start\n # assigning ComputeUnits to the ComputePilots.\n units = self.umgr.submit_units(cuds)\n\n # Wait for all compute units to reach a final state (DONE, CANCELED or\n # FAILED).\n self.umgr.wait_units()\n\n # Verify that 100% of the units came back with 'DONE' status\n done_units = 0\n for description in units:\n if description.state == \"DONE\":\n done_units += 1\n self.assertEquals(\n (float(done_units) / float(self.n)), 1.0,\n \"Only {0}% of CUs were DONE.\"\n .format(str((float(done_units) / float(self.n)) * 100.00)))\n\n def test_01_unit_details(self):\n \"\"\"Test unit details, units has all details accessible via api\n \"\"\"\n\n # Detail keys to be checked in unit dictionary\n expected_detail_keys = [\n 'type',\n 'umgr',\n 'uid',\n 'name',\n 'state',\n 'exit_code',\n 'stdout',\n 'stderr',\n 'pilot',\n 'sandbox',\n 'description',\n ]\n\n # Create description object from template description\n pilot_desc = rp.ComputePilotDescription(self.pd_init)\n\n # Launch the pilot.\n pilot = self.pmgr.submit_pilots(pilot_desc)\n\n self.umgr.add_pilots(pilot)\n\n # Create a workload of ComputeUnits.\n # Each compute unit runs '/bin/date'.\n cuds = list()\n for i in range(1, self.n + 1):\n # create a new CU description, and fill it.\n # Here we don't use dict initialization.\n cud = rp.ComputeUnitDescription()\n cud.executable = '/bin/date'\n cuds.append(cud)\n\n # Submit the previously created ComputeUnit descriptions to the\n # PilotManager. This will trigger the selected scheduler to start\n # assigning ComputeUnits to the ComputePilots.\n units = self.umgr.submit_units(cuds)\n\n # Wait for all compute units to reach a final state (DONE, CANCELED or\n # FAILED).\n self.umgr.wait_units()\n\n # Not asserting for 100% completion, that is not the idea here...\n\n # Check that all items in the dictionary\n # match the expected keys and that all\n # values are *not NONE*\n for unit in units:\n unit_dict = unit.as_dict()\n for key, val in unit_dict.iteritems():\n self.assertIn(key, expected_detail_keys)\n self.assertIsNotNone(\n val, msg=\"'{0}' unexpectedly None\".format(key))\n\n def test_02_failing_units(self):\n \"\"\"Test failing units, about ~50% of the units will fail\"\"\"\n\n # Create description object from template description\n pilot_desc = rp.ComputePilotDescription(self.pd_init)\n\n # Launch the pilot.\n pilot = self.pmgr.submit_pilots(pilot_desc)\n\n self.umgr.add_pilots(pilot)\n\n # Create a workload of ComputeUnits.\n # Each compute unit runs '/bin/date'.\n # About ~50% of them will fail\n cuds = list()\n for i in range(1, self.n + 1):\n # create a new CU description, and fill it.\n # Here we don't use dict initialization.\n cud = rp.ComputeUnitDescription()\n if i % 2:\n cud.executable = '/bin/date'\n else:\n # trigger an error now and then\n cud.executable = '/bin/data' # does not exist\n cuds.append(cud)\n\n # Submit the previously created ComputeUnit descriptions to the\n # PilotManager. This will trigger the selected scheduler to start\n # assigning ComputeUnits to the ComputePilots.\n units = self.umgr.submit_units(cuds)\n\n # Wait for all compute units to reach a final state (DONE, CANCELED or\n # FAILED).\n self.umgr.wait_units()\n\n # Verify that >= 50% of the units came back with 'DONE' status\n # TODO: better checks for failures...\n done_units = 0\n for description in units:\n if description.state == \"DONE\":\n done_units += 1\n self.assertGreaterEqual(\n (float(done_units) / float(self.n)), 0.50,\n \"Only {0}% of CUs were DONE.\"\n .format(str((float(done_units) / float(self.n)) * 100.00)))\n\n def test_03_multiple_pilots(self):\n \"\"\"Test multiple pilots\"\"\"\n\n # Have to hard-code list of resources\n # TODO: get real list of resources\n resources = ['local.localhost']\n\n # Create multiple pilot descriptions, one for each resource\n pilot_descriptions = list()\n resource_count = len(resources)\n for resource in resources:\n pd_init = {\n 'resource': resource,\n 'runtime': 15, # pilot runtime (min)\n 'exit_on_error': True,\n 'project': self.config[resource]['project'],\n 'queue': self.config[resource]['queue'],\n 'access_schema': self.config[resource]['schema'],\n 'cores': self.config[resource]['cores'],\n }\n pilot_descriptions.append(rp.ComputePilotDescription(pd_init))\n\n # Launch the pilot.\n pilot = self.pmgr.submit_pilots(pilot_descriptions)\n pilot_count = len(pilot)\n\n self.umgr.add_pilots(pilot)\n\n # Create a workload of ComputeUnits.\n # Each compute unit runs '/bin/date'.\n cuds = list()\n for i in range(0, self.n):\n # create a new CU description, and fill it.\n # Here we don't use dict initialization.\n cud = rp.ComputeUnitDescription()\n cud.executable = '/bin/date'\n cuds.append(cud)\n\n # Submit the previously created ComputeUnit descriptions to the\n # PilotManager. This will trigger the selected scheduler to start\n # assigning ComputeUnits to the ComputePilots.\n units = self.umgr.submit_units(cuds)\n\n # Wait for all compute units to reach a final state (DONE, CANCELED or\n # FAILED).\n self.umgr.wait_units()\n\n # Verify that 100% of the units came back with 'DONE' status\n done_units = 0\n for description in units:\n if description.state == \"DONE\":\n done_units += 1\n self.assertEquals(\n (float(done_units) / float(self.n)), 1.0,\n \"Only {0}% of CUs were DONE.\"\n .format(str((float(done_units) / float(self.n)) * 100.00)))\n\n # Finally assert that the number of requested vs submitted pilots are\n # the same\n self.assertEquals(resource_count, pilot_count)\n\n def tearDown(self):\n # Close pilot session\n self.session.close(cleanup=True)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2, failfast=True, catchbreak=True)\n","sub_path":"tests/acceptance/test_acceptance.py","file_name":"test_acceptance.py","file_ext":"py","file_size_in_byte":10437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"256327490","text":"from .solution_1 import Node\r\nfrom .solution_1 import Solution\r\n\r\n\r\ndef test_preorder_case_one():\r\n # Nary-Tree input serialization is represented in their level order\r\n # traversal, each group of children is separated by the null value.\r\n\r\n # tree [1, null, 3, 2, 4, null, 5, 6]\r\n root = Node(\r\n 1,\r\n [\r\n Node(3, [Node(5), Node(6)]),\r\n Node(2),\r\n Node(4),\r\n ],\r\n )\r\n\r\n res = Solution().preorder(root)\r\n\r\n assert res == [1, 3, 5, 6, 2, 4]\r\n\r\n\r\ndef test_preorder_case_two():\r\n # tree [1, null, 2, 3, 4, 5, null, null, 6, 7, null, 8, null, 9,\r\n # 10, null, null, 11, null, 12, null, 13, null, null, 14]\r\n root = Node(\r\n 1,\r\n [\r\n Node(2),\r\n Node(3, [Node(6), Node(7, [Node(11, [Node(14)])])]),\r\n Node(4, [Node(8, [Node(12)])]),\r\n Node(5, [Node(9, [Node(13)]), Node(10)]),\r\n ],\r\n )\r\n\r\n res = Solution().preorder(root)\r\n\r\n assert res == [1, 2, 3, 6, 7, 11, 14, 4, 8, 12, 5, 9, 13, 10]\r\n","sub_path":"leetcode/question_589/test_solution_1.py","file_name":"test_solution_1.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"4670264","text":"# 임포팅하는 패키지들은 평이하다 \nimport numpy as np\nimport numpy.fft as fft\nimport scipy.ndimage as nd\nimport scipy.misc as misc\nfrom math import pi\n\n#Read in source image\n# 아슈타 할배 이미지 업로드, 하\nsource = nd.imread(\"einstein.bmp\", flatten=True)\n\n#Pad image to simulate oversampling\n# w,h 만큼 상하좌우 검정색으로 패딩. 하\npad_len = len(source)\npadded = np.pad(source, ((pad_len, pad_len),(pad_len, pad_len)), 'constant', \n constant_values=((0,0),(0,0)))\n\n# 2차원 푸리에변환. 하\nft = fft.fft2(padded)\n\n#simulate diffraction pattern\n# 코드는 걍 진폭영상 구하기인데..코멘트는 거창하군.\ndiffract = np.abs(ft)\n\n# 패딩해 넣은 영상 크기 구하기\nl = len(padded)\n\n#keep track of where the image is vs the padding\n# 제로패딩된 1값 마스크 생성, 위애서 생성한 패딩된 이미지와 비슷한 구조\nmask = np.ones((pad_len+2,pad_len+2))\nmask = np.pad(mask, ((pad_len-1, pad_len-1),(pad_len-1, pad_len-1)), 'constant', \n constant_values=((0,0),(0,0)))\n\n#Initial guess using random phase info\n# 구해진 푸리에 진폭에 랜덤위상을 넣어서 랜덤 초기 푸리에신호 생성\nguess = diffract * np.exp(1j * np.random.rand(l,l) * 2 * pi)\n\n#number of iterations\n# 800번만 반복.\nr = 801\n\n#step size parameter\n# 학습율에 해당하는거 같음..\nbeta = 0.8\n\n#previous result\n# 800번 루프 s=0~800\nprev = None\nfor s in range(0,r):\n #apply fourier domain constraints\n # 구해진 푸리에 진폭에 이전 위상을 넣어서 푸리에신호 생성후 갱신\n update = diffract * np.exp(1j * np.angle(guess)) \n # 갱신 푸리에신호 역변환\n inv = fft.ifft2(update)\n # 진폭 취득\n inv = np.real(inv)\n # 이전 진폭 대치\n if prev is None:\n prev = inv\n \n #apply real-space constraints\n # ?? 약간 이해 안되는 대목\n # real 대신 abs()를 쓰면 이런거 안해도 될텐데..?\n temp = inv\n for i in range(0,l):\n for j in range(0,l):\n #image region must be positive\n if inv[i,j] < 0 and mask[i,j] == 1:\n inv[i,j] = prev[i,j] - beta*inv[i,j]\n #push support region intensity toward zero\n if mask[i,j] == 0:\n inv[i,j] = prev[i,j] - beta*inv[i,j]\n \n \n prev = temp\n \n # 역변환.\n guess = fft.fft2(inv)\n \n #save an image of the progress\n # 저장.\n if s % 10 == 0:\n misc.imsave(\"/Users/chasegoddard/Stuff/CDI/code/save/progress\" + str(s) +\n \".bmp\", prev)\n print(s)\n\n\n","sub_path":"img-reconstruct.py","file_name":"img-reconstruct.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"105611325","text":"from flask import Flask, abort\nfrom flask.ext import restful\nfrom flask.ext.restful import reqparse\nfrom common.nagios_control import NagiosControl,NagiosControlError\nfrom common.cluster_host import ClusterHosts\nfrom common.flipper import ElasticSearchMaintenanceToggle\n\nclass NagiosScheduleDowntimeAPI(restful.Resource):\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('cluster', type=str, required=True, help=\"Type of incident e.g. triggered, acknowledge, etc.\")\n self.parser.add_argument('servicename', type=str, required=True, help=\"Type of incident e.g. triggered, acknowledge, etc.\")\n self.parser.add_argument('action', type=str, required=True, help=\"Type of incident e.g. triggered, acknowledge, etc.\")\n self.parser.add_argument('duration', type=str, default=\"300\", help=\"Type of incident e.g. triggered, acknowledge, etc.\")\n super(NagiosScheduleDowntimeAPI, self).__init__()\n\n def put(self):\n args = self.parser.parse_args() \n hosts = ClusterHosts.get_hosts(args['cluster'])\n data = \"duration:%s,service:%s\" % (args['duration'], args['servicename'])\n action = NagiosControl(hosts, args['servicename'], args['action'], data)\n action.perform_action()\n if args['action'] == 'schedule_downtime':\n action.check_downtime()\n return \"Nagios has applied %s to service %s on cluster %s\" % (args['action'], args['servicename'], args['cluster'])\n\n##### ES full downtime\n # def validate(self):\n # args = self.parser.parse_args()\n # if args['cluster'] not in ES_CLUSTERS:\n # raise UnknownClusterError(args['cluster'])\n # if args['operatingmode'] not in OPERATING_MODES:\n # raise UnknownModeError(args['operatingmode'])\n\n # def put(self):\n # self.validate()\n # args = self.parser.parse_args() \n # hosts = ClusterHosts.get_hosts(args['cluster'])\n # esmt = ElasticSearchMaintenanceToggle(hosts)\n # if args['operatingmode'] == 'maintenance':\n # esmt.maintanence_mode()\n # return \"Put %s into maintenace mode\" % args['cluster']\n # else:\n # esmt.production_mode()\n # return \"Put %s into production mode\" % args['cluster']\n # return \"Do nothing!!\"\n\n# class PagerDutyIncidentAPI(restful.Resource):\n# def __init__(self):\n# self.parser = reqparse.RequestParser()\n# self.parser.add_argument('user', type=str, default=\"PI8NLOA\", location=\"json\")\n# super(TaskAPI, self).__init__()\n# def put(self,incidentNumber):\n# pass","sub_path":"resources/nagios.py","file_name":"nagios.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"1017203","text":"from rest_framework import status\n\nfrom .article_tests_base_class import ArticlesBaseTest\nfrom .test_data import *\nfrom authors.apps.articles.models import Rating, Article\n\n\nclass ArticleRatingTestCase(ArticlesBaseTest):\n \"\"\"Test like and dislike of articles\"\"\"\n\n def test_valid_rating(self):\n \"\"\"Tests for a valid rating post request.\"\"\"\n res = self.client.post(self.rate_url,\n valid_rate_data1, **self.header_user2, format='json')\n self.assertEqual(res.data, valid_data_rasponse)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n def test_rate_non_existent_article(self):\n \"\"\"Tests a post request for a non-existent article.\"\"\"\n res = self.client.post(self.non_existent_article,\n valid_rate_data1, **self.header_user2, format='json')\n self.assertEqual(res.data, non_existent_article_response)\n self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_author_rating(self):\n \"\"\"Tests for a post request where the user is also the article author.\"\"\"\n res = self.client.post(self.rate_url,\n valid_rate_data1, **self.header_user1, format='json')\n self.assertEqual(res.data, author_rating_data_response)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_rating_twice(self):\n \"\"\"Tests for a post request by a user to rate an article they have already rated.\"\"\"\n self.client.post(self.rate_url,\n valid_rate_data1, **self.header_user2, format='json')\n res = self.client.post(self.rate_url,\n valid_rate_data1, **self.header_user2, format='json')\n self.assertEqual(res.data, rating_twice_data_response)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_reviews(self):\n \"\"\"Tests for a valid get request to fetch all reviews.\"\"\"\n self.client.post(self.rate_url,\n valid_rate_data1, **self.header_user2, format='json')\n res = self.client.get(self.get_rate_url, **self.header_user2, format='json')\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n def test_no_reviews(self):\n \"\"\"Tests for a get request to fetch non-existent reviews.\"\"\"\n self.client.post(self.rate_url,\n valid_rate_data2, **self.header_user2, format='json')\n res = self.client.get(self.get_rate_url, **self.header_user2, format='json')\n self.assertEqual(res.data, no_review_data_response)\n self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_article_not_rated(self):\n \"\"\"Tests for an article that has not been rated when getting articles.\"\"\"\n res = self.client.get(self.article_rating, **self.header_user2, format='json')\n self.assertEqual(res.data['results'][0]['average_rating'], no_ratings_data_response)\n\n def test_get_non_existent_article(self):\n \"\"\"Tests for a request to get the rating of a non-existent article.\"\"\"\n res = self.client.get(self.get_non_existent_article, **self.header_user2, format='json')\n self.assertEqual(res.data, non_existent_article_response)\n self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_get_rated_article(self):\n \"\"\"Tests for an article that has been rated when getting articles.\"\"\"\n self.client.post(self.rate_url,\n valid_rate_data1, **self.header_user2, format='json')\n res = self.client.get(self.article_rating, **self.header_user2, format='json')\n self.assertEqual(res.data['results'][0]['average_rating'], 2.0)\n\n def test_update_rating(self):\n \"\"\"Tests for a valid request to update a rating.\"\"\"\n self.client.post(self.rate_url,\n valid_rate_data1, **self.header_user2, format='json')\n res = self.client.put(self.rate_url, update_rating_data, **self.header_user2, format='json')\n self.assertEqual(res.data, update_rating_data_response)\n \n def test_update_not_rated_article(self):\n \"\"\"Tests for a valid request to update a rating.\"\"\"\n self.client.post(self.rate_url,\n valid_rate_data1, **self.header_user1, format='json')\n res = self.client.put(self.rate_url, update_rating_data, **self.header_user2, format='json')\n self.assertEqual(res.data, not_rated_data_response)\n\n def test_update_non_existent_article(self):\n \"\"\"Tests for a valid request to update a rating.\"\"\"\n res = self.client.put(self.non_existent_article, update_rating_data, **self.header_user2, format='json')\n self.assertEqual(res.data, non_existent_article_response)\n\n def test_model_representation(self):\n retrieved_article = Article.objects.filter(slug=self.slug).first()\n retrieved_article.published = True\n retrieved_article.save()\n self.rating = Rating.objects.create(article=retrieved_article, user= self.user2,\n value=1, review=\"no way\")\n self.assertEqual(str(self.rating), \"This is rating no: 1\")\n","sub_path":"authors/apps/articles/tests/test_article_rating.py","file_name":"test_article_rating.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"638106367","text":"#coding:utf-8\nfrom socket import *\nimport threading\nimport time\nimport logging\n\n# 创建socket,绑定到端口,开始监听\ntcpSerPort = 8899\ntcpSerSock = socket(AF_INET, SOCK_STREAM)\n\n# 测试地址\n# http://www.hitsz.edu.cn/UserFiles/banner/orig/1574407474192.jpg\n# https://2.python-requests.org//zh_CN/latest/user/quickstart.html\n\n\ncache_map = {}\n\nimport requests\n\n\ndef worker(tcpCliSock):\n print('thread name is %s ......' % (threading.current_thread().name))\n message = tcpCliSock.recv(4096).decode()\n try:\n # 在代理服务器上创建一个tcp socket\n msg_list = message.split()\n if not msg_list:\n return\n filename = msg_list[1].strip('/')\n if not filename.startswith('http'):\n filename = 'http://' + filename\n\n ret = ''\n if filename in cache_map:\n print(\"hit cache========\")\n ret = cache_map[filename]\n else:\n r_get = requests.get(filename)\n\n ret = \"HTTP/1.1 200 OK\\r\\n\"\n for k, v in r_get.headers.items():\n if k in ['Content-Type', 'Content-Length', 'Cache-Control', 'Content-Language', 'X-Content-Type-Options'\n 'Transfer-Encoding', 'Connection', 'Date', 'X-Frame-Options', 'Server', 'Set-Cookie']:\n ret += '{k}: {v}\\r\\n'.format(k=k,v=v)\n ret += '\\r\\n'\n ret += r_get.content\n cache_map[filename] = ret\n\n print('start send data\\n')\n logging.info('start send data\\n')\n tcpCliSock.sendall(ret)\n tcpCliSock.close()\n print('finish send, sleep 10s')\n time.sleep(10)\n except Exception as e:\n print('exception:')\n print(e)\n tcpCliSock.close()\n\n\nif __name__ == '__main__':\n # Prepare a server socket\n tcpSerSock.bind(('', tcpSerPort))\n tcpSerSock.listen(5)\n\n try:\n while True:\n # 开始从客户端接收请求\n print('waiting for client connect...')\n tcpCliSock, addr = tcpSerSock.accept()\n t = threading.Thread(target=worker, args=[tcpCliSock], name=addr)\n t.start()\n # t.join()\n except Exception as e:\n print(e)\n\n tcpSerSock.close()\n","sub_path":"web_proxy_thread.py","file_name":"web_proxy_thread.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"552934987","text":"db = DAL('sqlite://storage.sqlite')\n\n#auth\nfrom gluon.tools import *\nauth = Auth(db)\nauth.define_tables()\ncrud = Crud(db)\n\ndb.define_table('person',\n Field('name', unique=True),\n Field('email'),\n Field('phone'),\n format = '%(person)s')\n\n\n#database for projects\ndb.define_table('project',\n Field('person_id', 'reference person'),\n Field('title'),\n Field('body', 'text'),\n Field('image', 'upload'),\n Field('created_on', 'datetime', default=request.now),\n Field('created_by', 'reference auth_user', default=auth.user_id),\t\tField('category'),\n format='%(title)s')\n\n#db for documents on each project\ndb.define_table('document',\n Field('project_id', 'reference project'),\n Field('name'),\n Field('file', 'upload'),\n Field('created_on', 'datetime', default=request.now),\n Field('created_by', 'reference auth_user', default=auth.user_id),\n format='%(name)s')\n\n#db for posts on each project\ndb.define_table('post',\n Field('project_id', 'reference project'),\n Field('body', 'text'),\n Field('created_on', 'datetime', default=request.now),\n Field('created_by', 'reference auth_user', default=auth.user_id))\n\n#Person validators\ndb.person.name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, db.person.name)]\ndb.person.email.requires = [IS_NOT_EMPTY(), IS_EMAIL(error_message='Please enter a valid email!')]\ndb.person.phone.requires = IS_MATCH('^1?((-)\\d{3}-?|\\(\\d{3}\\))\\d{3}-?\\d{4}$', error_message='Please enter in form 1-123-456-7899')\n\n\ndb.project.title.requires = IS_NOT_IN_DB(db, 'project.title')\ndb.project.body.requires = IS_NOT_EMPTY()\ndb.project.person_id.readable = db.project.person_id.writable = False\ndb.project.created_by.readable = db.project.created_by.writable = False\ndb.project.created_on.readable = db.project.created_on.writable = False\ndb.project.category.requires = IS_IN_SET([\"Music\", \"Applications\", \"Games\", \"Movies\", \"Other\"], zero=None)\n\ndb.post.body.requires = IS_NOT_EMPTY()\ndb.post.project_id.readable = db.post.project_id.writable = False\ndb.post.created_by.readable = db.post.created_by.writable = False\ndb.post.created_on.readable = db.post.created_on.writable = False\n\ndb.document.name.requires = IS_NOT_IN_DB(db, 'document.name')\ndb.document.project_id.readable = db.document.project_id.writable = False\ndb.document.created_by.readable = db.document.created_by.writable = False\ndb.document.created_on.readable = db.document.created_on.writable = False\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"167185581","text":"# Uses python3\nimport sys\n\n# Task: The goal in this problem is to find the minimum number of coins needed to change the input value (an integer)\n# into coins with denominations 1, 5, and 10\n# Input: The input consists of a single integer m\n# Constraints: 1 <= m <= 10^3\n# Output: Output the minimum number of coins with the denominations 1,5, 10 that changes m\n#\n# Examples\n# Input: 28\n# Ouptut: 6 (1+1)\n# Explanation: 28 = 10 +10 + 5 + 1 + 1 + 1\n\ndef get_change(m):\n dimes = int(m/10)\n remainder = m-(dimes*10)\n nickels = int(remainder/5)\n remainder -= nickels*5\n\n return dimes+nickels+remainder\n\nif __name__ == '__main__':\n #m = int(sys.stdin.read())\n m = int(input(\"Enter int value: \"))\n print(get_change(m))\n","sub_path":"GreedyAlgorithms/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"226160955","text":"import torch\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom torch.autograd import Variable\n\nfrom utils.utils import *\nfrom models.model import *\n\n\ndef extract_relus(model):\n relus = []\n for _, module in list(model._modules.items()):\n for _, module in list(module._modules.items()):\n if isinstance(module, nn.ReLU):\n relus.append(module)\n return relus\n\n\nclass Hook():\n def __init__(self, module, backward=False):\n if backward==False:\n self.hook = module.register_forward_hook(self.hook_fn)\n else:\n self.hook = module.register_backward_hook(self.hook_fn)\n def hook_fn(self, module, input, output):\n self.input = input\n self.output = output\n def close(self):\n self.hook.remove()\n\n# load models\nencoder = Encoder()\ndecoder = Decoder()\nencoder.load_state_dict(torch.load('saved_models/encoder.pth'))\ndecoder.load_state_dict(torch.load('saved_models/decoder.pth'))\nencoder.eval()\ndecoder.eval()\n\n\nimsize = -1\nPATH = 'data/snail.jpg'\nsigma = 25\nsigma_ = sigma/255.\nlr = 0.1\nopt_steps = 200\n\n##########################\n# which layer and filter to visualize\n##########################\nlayer = -2\n# filter = 3\n\nimg_np = pil_to_np(crop_image(get_image(PATH, imsize)[0], d=32))\n\n# hook relus\nrelus = extract_relus(encoder) + extract_relus(decoder)\nhookF = [Hook(layer) for layer in relus]\n\n\nfor filter in range(16):\n noisy_img = np.clip(np.random.normal(scale=sigma, size=img_np.shape), 0, 1).astype(np.float32)\n\n # make noisy_img a variable\n noisy_img_torch = Variable(np_to_torch(noisy_img), requires_grad=True)\n optimizer = torch.optim.Adam([noisy_img_torch], lr=lr)\n\n for i in range(opt_steps):\n optimizer.zero_grad()\n latent = encoder(noisy_img_torch)\n recon = decoder(latent)\n\n # former = torch_to_np(noisy_img_torch.clone())\n output = [hook.output for hook in hookF][layer][0][filter]\n loss = -output.mean()\n print(loss)\n loss.backward()\n optimizer.step()\n\n optimum = np.clip(torch_to_np(noisy_img_torch), 0, 1).astype(np.float32)\n plt.figure()\n plt.imshow(optimum.transpose(1,2,0))\n # plt.show()\n plt.savefig(\"layer_visualizations/second-to-last-layer-\" + str(filter) + '.png')\n","sub_path":"layer_viz.py","file_name":"layer_viz.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"340791425","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on %(date)s\r\n\r\n@author: %(irnmn)s\r\n\"\"\"\r\n\r\n\"\"\"\r\n\r\nGiven a non-negative integer num, return the number of steps to reduce it to zero. If the current number is even, you have to divide it by 2, otherwise, you have to subtract 1 from it.\r\n\r\n\"\"\"\r\n\r\n\r\ndef numberOfSteps (num):\r\n \r\n num_step= 0\r\n \r\n while num != 0 :\r\n if num % 2 == 0:\r\n num = num/2\r\n num_step +=1\r\n else:\r\n num = num-1\r\n num_step+=1\r\n return num_step\r\n \r\n \r\n\r\nprint(numberOfSteps(14))\r\n\r\n\r\n#%%\r\n\r\n\r\ndef numberOfSteps (num):\r\n \r\n c =0\r\n while num != 0:\r\n \r\n num, c = num-1 if num % 2 else num//2, c+1\r\n return c\r\n \r\n \r\nprint(numberOfSteps(14))\r\n\r\n","sub_path":"Easy/1342_num_of_Steps_to_reduce_zero.py","file_name":"1342_num_of_Steps_to_reduce_zero.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"528685588","text":"m, n = map(int, input().split(\" \"))\nhigh = n\n\n# 에라스토테네스의 체\ncheck = [False, False] + [True] * (high - 1)\n\nfor i in range(2, high + 1):\n if check[i]:\n for j in range(2 * i, high + 1, i):\n check[j] = False\n\nfor i in range(m, n + 1):\n if check[i]:\n print(i)\n","sub_path":"1929_소수 구하기.py","file_name":"1929_소수 구하기.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"550253682","text":"'''\nPURPOSE : EE2703 - Assignment 8\nAUTHOR : Manvar Nisharg (EE19B094)\nINPUT : NULL\nOUTPUT : Spectrum of various functions\n'''\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom pylab import *\nfrom numpy.fft import fftshift, fft, ifft, ifftshift\n\n#Plotting function\ndef customplot(func,x,y,xlim):\n plt.figure()\n\n plt.subplot(2,1,1)\n plt.plot(x,np.abs(y),lw=2)\n plt.xlim(-1*xlim,xlim)\n plt.ylabel(r\"$|y|$\")\n plt.title(f\"Spectrum of %s\"%func)\n plt.grid(True)\n\n plt.subplot(2,1,2)\n #plt.plot(x,np.angle(y),'go',lw=2)\n ii = np.where(np.abs(y)>1e-3)\n plt.plot(x[ii], np.angle(y[ii]),'ro',lw=2)\n plt.xlim(-1*xlim,xlim)\n plt.ylim(-5,5)\n plt.xlabel(r\"$k$\")\n plt.ylabel(r\"Phase of $Y$\")\n plt.grid(True)\n plt.show()\n\n#Cos^3 function\ncos3 = lambda x : np.cos(x)**3\n\n#Sin^3 function\nsin3 = lambda x : np.sin(x)**3\n\n#Gaussian function\ngauss = lambda x : np.exp(-x**2/2)\n\n#cos(cos(x)) function \ncoscos = lambda x : np.cos(20*x+5*np.cos(x))\n\n#Sin(5x) function\nsin5 = lambda x : np.sin(5*x)\n\n#Modulated signal\nmodulated = lambda x : (1+0.1*np.cos(x))*np.cos(10*x)\n\n#Dictionary of functions\nfunc_dict = {'sin':sin5,\n\t\t\t'modul':modulated,\n\t\t\t'cos^3' : cos3,\n\t\t\t'sin^3' : sin3,\n\t\t\t'coscos' : coscos,\n\t\t\t'gauss' : gauss }\n\n#Function to perform dft and plot spectrum\ndef perform_dft(func,N=512,steps = 513, r=4*np.pi, phase_limit=1e-3, xlim=40, w_lim=64):\n\tt = np.linspace(-r,r,steps)[:-1]\t#Time range\n\ty = func_dict[func](t)\t\t\t\t#Sampled function values\n\tY = fftshift(fft(y))/N \t\t\t\t#Shifting freq\n\tw = np.linspace(-w_lim,w_lim,steps)[:-1]\t#Frequency values to be plotted\n\n\tcustomplot(func,w,Y,xlim)\n\t\n\n\ndef perform_dft_gaussian(func,tolerance=1e-6,N=128):\n\tT = 8*np.pi\n\tY_old = 0\n\n\twhile 1:\n\n\t\t#Time resolution\n\t\tdt = T/N\n\t\t#Frequency resolution\n\t\tdw = 2*np.pi/T\n\n\t\t#Freq window size\n\t\tW = N*dw\n\n\t\t#Time samples\n\t\tt = np.linspace(-T/2,T/2,N+1)[:-1]\n\t\t#Freq samples\n\t\tw = np.linspace(-W/2,W/2,N+1)[:-1]\n\n\t\ty = gauss(t)\n\n\t\tY_new = dt/(2*np.pi) * fftshift(fft(ifftshift(y)))\n\n\t\terror = sum(abs(Y_new[::2]) - Y_old)\n\t\tY_old = Y_new\n\n\t\tif error < tolerance:\n\t\t\tcustomplot(func,w,Y_new,5)\n\t\t\tprint(\"Error in Gaussian case is {}\".format(error))\n\t\t\treturn\n\n\t\tT*=2\n\t\tN*=2\n\n#Calculate error between actual and inversed values of dft of series of random values\nx=np.random.rand(100)\nX=fft(x)\ny=ifft(X)\nc_[x,y]\nmaxError = max(np.abs(y-x))\nprint('Magnitude of maximum error between actual and computed values of the random sequence:', maxError)\n\n#DFT of various functions\nperform_dft('sin',xlim=10)\nperform_dft('modul',xlim=40)\nperform_dft('cos^3',xlim=15, steps= 129 , w_lim=16, N = 128)\nperform_dft('sin^3',xlim=15)\nperform_dft('coscos',xlim=40)\nperform_dft_gaussian('gauss')","sub_path":"Tutorial8/EE2703_ASSIGN8_EE19B094.py","file_name":"EE2703_ASSIGN8_EE19B094.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"367367080","text":"import sys\nimport threading\nimport time\nsys.path.append('that/apollo/apollo_agent/Scr/python/Lib/')\nfrom apollolog import ApolloLog\nimport db.store\nimport json\nimport requests\ndb.store.load_config('salt_proxy.conf')\n\nsalt = db.store.salt_proxy\ndata = {}\ndata['username'] = \"rongjunfeng\"\ndata['operation_time'] = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\ndata['exec_time'] = int(time.time())\ndata['target'] = \"931,1022,918,914,921,917,910,935\"\ndata['func'] = \"cmd.run\"\ndata['args'] = \"rpm -qa|grep salt-ganjisrv\"\ndef add_task():\n\n sql = \"insert into mon_deploy_operation (user_name,operation_time,exec_time,target,func,args) value ('%s','%s',%d,'%s','%s','%s')\" % (data['username'],data['operation_time'],data['exec_time'],data['target'] ,data['func'],data['args'])\n r = salt.execute(sql)\n\ndef factory():\n ret = {}\n url = \"http://resource.apollo.corp.ganji.com/Service/API\"\n values = {\"api\":'ResourceScript','action':'getHostByServerId'}\n for i in data['target'].split(\",\"):\n values['server_id'] = i\n r = requests.get(url,params = values)\n sql = \"insert into mon_deploy_job_return (operation_id,id,hostname) values (%d,'%s','%s')\" % (1,i,r.content)\n r = salt.execute(sql)\nadd_task()\n#factory()\n\n","sub_path":"test/apollo/apollo_agent/Scr/python/salt_api/add_salt_job.py","file_name":"add_salt_job.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"50981521","text":"import time, random\n\nok = False\n\nwhile(not ok):\n try:\n minnum = int(input('최솟값 입력:'))\n maxnum = int(input('최댓값 입력:'))\n except: minnum = 1; maxnum = 0\n\n if minnum > maxnum:\n print('올바른 값을 입력해 주십시오')\n elif (maxnum - minnum < 1):\n print('최솟값과 최댓값의 차는 1보다 커야 합니다')\n else:\n ok = True\n\ncleared = False\n\nans = random.randrange(minnum, maxnum+1)\ntrial = 0\n\nwhile(not cleared):\n print(minnum, ',', maxnum, '사이의 숫자를 입력하십시오.')\n\n ok = False\n while(not ok):\n try:\n inp = int(input('입력:'))\n ok = True\n except: print('올바른 값을 입력해 주십시오')\n\n print('흠', end='')\n for i in range(1,random.randrange(3,20)):\n print('.', end='')\n time.sleep(random.randrange(1,10)/15)\n print('')\n\n if inp == ans:\n cleared = True\n elif inp < ans:\n if abs(inp-ans) <= 10:\n print('입력한 숫자가 약간 작습니다')\n else:\n print('입력한 숫자가 작습니다')\n elif inp > ans:\n if abs(inp-ans) <= 10:\n print('입력한 숫자가 약간 큽니다')\n else:\n print('입력한 숫자가 큽니다')\n print('---------------------------------------------')\n trial += 1\n\nprint('클리어! 정답:', ans)\n","sub_path":"updown.py","file_name":"updown.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"528493705","text":"import hashlib, json, math, os\nfrom . import *\nfrom datetime import datetime\nfrom flask import Blueprint\nfrom flask_jwt_extended import jwt_required, get_jwt_claims\nfrom flask_restful import Api, Resource, reqparse, marshal\nfrom mailjet_rest import Client\n\nfrom blueprints import app, db, user_required\nfrom blueprints.cart.model import Carts, CartDetails\nfrom blueprints.product.model import Products\nfrom blueprints.user.model import Users\n\nbp_user = Blueprint(\"user\", __name__)\napi = Api(bp_user)\n\nclass UserProfileResource(Resource):\n \"\"\"\n A class used to represent user's profile\n\n Methods\n -------\n options(id=None)\n Return status ok when get hit\n get()\n Show user's profile (username and password)\n post()\n Change user's email\n put()\n Change user's password\n \"\"\"\n\n def options(self, id=None):\n return {\"status\": \"ok\"}, 200\n\n @jwt_required\n @user_required\n def get(self):\n \"\"\"\n Show user's profile (username and email)\n Only user can see his/her profile\n \"\"\"\n claims = get_jwt_claims()\n user = Users.query.get(claims[\"id\"])\n result = {\n \"id\": user.id,\n \"username\": user.username,\n \"email\": user.email\n }\n return result, 200, {\"Content-Type\": \"application/json\"}\n \n @jwt_required\n @user_required\n def post(self):\n \"\"\"\n Change user's email\n Only user can change his/her email\n\n JSON Inputs\n -----------\n email : str\n User's new email\n \"\"\"\n parser = reqparse.RequestParser()\n parser.add_argument(\"email\", location=\"json\")\n args = parser.parse_args()\n claims = get_jwt_claims()\n user = Users.query.get(claims[\"id\"])\n user.email = args[\"email\"]\n db.session.commit()\n return {\"status\": \"SUCCESS\"}, 200, {\"Content-Type\": \"application/json\"}\n \n @jwt_required\n @user_required\n def put(self):\n \"\"\"\n Change user's password\n Only user can change his/her email\n Return status 401 when user's input is different from user's password in database\n\n JSON Inputs\n -----------\n old_password : str\n User's old password\n new_password : str\n User's new password\n \"\"\"\n parser = reqparse.RequestParser()\n parser.add_argument(\"old_password\", location=\"json\")\n parser.add_argument(\"new_password\", location=\"json\")\n args = parser.parse_args()\n claims = get_jwt_claims()\n user = Users.query.get(claims[\"id\"])\n test_password = hashlib.md5(args[\"old_password\"].encode()).hexdigest()\n if test_password == user.password:\n new_password = hashlib.md5(args[\"new_password\"].encode()).hexdigest()\n user.password = new_password\n db.session.commit()\n return {\"status\": \"SUCCESS\"}, 200, {\"Content-Type\": \"application/json\"}\n else:\n return {\"status\": \"UNAUTHORIZED\"}, 401, {\"Content-Type\": \"application/json\"}\n\nclass UserTransactionResource(Resource):\n \"\"\"\n A class used to represent user's transaction history\n\n Methods\n -------\n options(id=None)\n Return status ok when get hit\n get()\n Show user's transaction history\n \"\"\"\n\n def options(self, id=None):\n return {\"status\": \"ok\"}, 200\n \n @jwt_required\n @user_required\n def get(self):\n \"\"\"\n Show user's transaction history\n Only user can see his/her transaction history\n Pagination (5 record per pages)\n\n Args Inputs\n -----------\n page : int\n Page number (for pagination)\n \"\"\"\n parser = reqparse.RequestParser()\n parser.add_argument(\"page\", location=\"args\", type=int, default=1)\n args = parser.parse_args()\n claims = get_jwt_claims()\n transactions = Carts.query.filter_by(user_id=claims[\"id\"]).filter_by(status=True).order_by(Carts.id.desc())\n offset = (args[\"page\"]-1)*5\n qry_result = []\n for transaction in transactions.limit(5).offset(offset):\n transaction_dict = marshal(transaction, Carts.response_fields)\n details = CartDetails.query.filter_by(cart_id=transaction.id)\n transaction_details = []\n for detail in details:\n product = Products.query.get(detail.product_id)\n detail_dict = marshal(detail, CartDetails.response_fields)\n product_dict = marshal(product, Products.response_fields)\n detail_dict[\"product\"] = product_dict\n transaction_details.append(detail_dict)\n transaction_dict[\"details\"] = transaction_details\n qry_result.append(transaction_dict)\n result = {\n \"page\": args[\"page\"],\n \"total_page\": math.ceil(transactions.count()/5),\n \"data\": qry_result\n }\n return result, 200, {\"Content-Type\": \"application/json\"}\n\nclass CartResource(Resource):\n \"\"\"\n A class used to represent user's cart (or transaction)\n\n Methods\n -------\n options(id=None)\n Return status ok when get hit\n get()\n Show list of product and its quantity in user's cart\n \"\"\"\n\n def options(self, id=None):\n return {\"status\": \"ok\"}, 200\n\n @jwt_required\n @user_required\n def get(self):\n \"\"\"\n Show list of product and its quantity in user's cart\n Only user can see his/her cart\n \"\"\"\n claims = get_jwt_claims()\n cart = Carts.query.filter_by(user_id=claims[\"id\"]).filter_by(status=False).first()\n if cart != None:\n album_list = CartDetails.query.filter_by(cart_id=cart.id)\n cart_details = []\n for album in album_list:\n album_dict = marshal(album, CartDetails.response_fields)\n album_dict[\"product\"] = marshal(Products.query.get(album.product_id), Products.response_fields)\n cart_details.append(album_dict)\n result = {\n \"cart\": marshal(cart, Carts.response_fields),\n \"cart_details\": cart_details\n }\n return result, 200, {\"Content-Type\": \"application/json\"}\n else:\n return {\"status\": \"EMPTY\"}, 200, {\"Content-Type\": \"application/json\"}\n\nclass DeleteProductFromCartResource(Resource):\n \"\"\"\n A class used to contain user's action to remove product from his/her cart\n\n Methods\n -------\n options(id=None)\n Return status ok when get hit\n delete(id)\n Remove product from user's cart by its id\n \"\"\"\n\n def options(self, id=None):\n return {\"status\": \"ok\"}, 200\n\n @jwt_required\n @user_required\n def delete(self, id):\n \"\"\"\n Remove product from user's cart by its id\n Only user can do this action\n\n Parameters\n ----------\n id : int\n Product's id in database\n \"\"\"\n claims = get_jwt_claims()\n cart = Carts.query.filter_by(user_id=claims[\"id\"]).filter_by(status=False).first()\n row = CartDetails.query.filter_by(cart_id=cart.id).filter_by(product_id=id).first()\n if row != None:\n db.session.delete(row)\n db.session.commit()\n cart = Carts.query.get(row.cart_id)\n product = Products.query.get(row.product_id)\n cart.total -= row.qty * product.price\n db.session.commit()\n return {\"status\": \"DELETED\"}, 200, {\"Content-Type\": \"application/json\"}\n else:\n return {\"status\": \"EMPTY\"}, 200, {\"Content-Type\": \"application/json\"}\n\nclass DeliveryDetailResource(Resource):\n \"\"\"\n A class used to contain user's action to input delivery detail for current cart\n\n Methods\n -------\n options(id=None)\n Return status ok when get hit\n post()\n Input delivery detail for current cart\n \"\"\"\n def options(self, id=None):\n return {\"status\": \"ok\"}, 200\n\n @jwt_required\n @user_required\n def post(self):\n \"\"\"\n Input delivery detail for current cart\n Only user can do this action\n\n JSON Inputs\n -----------\n name : str\n Recipient's name\n phone : str\n Recipient's phone number\n address : str\n Recipient's address\n city : str\n Recipient's city\n province : str\n Recipient's province\n postal_code : str\n Recipient's place postal code\n \"\"\"\n parser = reqparse.RequestParser()\n parser.add_argument(\"name\", location=\"json\", required=True)\n parser.add_argument(\"phone\", location=\"json\", required=True)\n parser.add_argument(\"address\", location=\"json\", required=True)\n parser.add_argument(\"city\", location=\"json\", required=True)\n parser.add_argument(\"province\", location=\"json\", required=True)\n parser.add_argument(\"postal_code\", location=\"json\", required=True)\n args = parser.parse_args()\n claims = get_jwt_claims()\n ### ADD DELIVERY DETAIL TO CART ###\n cart = Carts.query.filter_by(user_id=claims[\"id\"]).filter_by(status=False).first()\n if cart != None:\n cart.name = args[\"name\"]\n cart.phone = args[\"phone\"]\n cart.address = args[\"address\"]\n cart.city = args[\"city\"]\n cart.province = args[\"province\"]\n cart.postal_code = args[\"postal_code\"]\n db.session.commit()\n return {\"status\": \"SUCCESS\"}, 200, {\"Content-Type\": \"application/json\"}\n else:\n return {\"status\": \"EMPTY\"}, 200, {\"Content-Type\": \"application/json\"}\n\nclass PaymentDetailResource(Resource):\n \"\"\"\n A class used to represent user's payment detail for current cart\n This class is also used to contain user's action to confirm payment\n\n Methods\n -------\n options(id=None)\n Return status ok when get hit\n get()\n Show user's payment detail for current cart\n post()\n Confirm payment for current cart\n \"\"\"\n def options(self, id=None):\n return {\"status\": \"ok\"}, 200\n\n @jwt_required\n @user_required\n def get(self):\n \"\"\"\n Show user's payment detail for current cart\n Only user can do this action\n \"\"\"\n claims = get_jwt_claims()\n cart = Carts.query.filter_by(user_id=claims[\"id\"]).filter_by(status=False).first()\n item_list = CartDetails.query.filter_by(cart_id=cart.id)\n result = []\n for item in item_list:\n product = Products.query.get(item.product_id)\n result.append({\n \"product\": product.album,\n \"qty\": item.qty\n })\n if cart != None:\n return {\"product_list\": result, \"total\": cart.total}, 200, {\"Content-Type\": \"application/json\"}\n else:\n return {\"status\": \"NOT FOUND\"}, 404, {\"Content-Type\": \"application/json\"}\n\n @jwt_required\n @user_required\n def post(self):\n \"\"\"\n Confirm payment for current cart, then send receipt by email to user\n Only user can do this action\n \"\"\"\n claims = get_jwt_claims()\n cart = Carts.query.filter_by(user_id=claims[\"id\"]).filter_by(status=False).first()\n if cart != None:\n if cart.name == None or cart.phone == None or cart.address == None or cart.city == None or cart.province == None or cart.postal_code == None:\n return {\"status\": \"NOT FOUND\"}, 404, {\"Content-Type\": \"application/json\"}\n else:\n cart.status = True\n db.session.commit()\n ### USE MAILJET TO SEND EMAIL TO USER ###\n env = os.environ.get(\"FLASK_ENV\", \"development\")\n ### SKIP SET API KEY AND SECRET WHEN TESTING ###\n if env == \"development\":\n api_key = os.environ[\"MAILJET_API_KEY\"]\n api_secret = os.environ[\"MAILJET_API_SECRET\"]\n mailjet = Client(auth=(api_key, api_secret), version=\"v3.1\")\n item_list = CartDetails.query.filter_by(cart_id=cart.id)\n html_part = \"
    \"\n for item in item_list:\n product = Products.query.get(item.product_id)\n item_html = \"
  1. \" + product.album + \" (\" + str(item.qty) + \")\" + \"
  2. \"\n html_part += item_html\n html_part += \"
\"\n total = \"Total: \" + str(cart.total) + \" rupiah\"\n opening = \"Pembayaranmu telah berhasil dikonfirmasi. Berikut detail transaksimu:\"\n ending = \"

Terima kasih telah membeli produk kami. Nikmati terus berbagai musik berkualitas bersama MusiKamu.

- MusiKamu Project -\"\n data = {\"Messages\": [{\n \"From\": {\"Email\": \"musikamuproject@gmail.com\", \"Name\": \"MusiKamu\"},\n \"To\": [{\"Email\": claims[\"email\"], \"Name\": claims[\"username\"]}],\n \"Subject\": \"Konfirmasi Pembayaran\",\n \"TextPart\": \"Pembayaran Anda telah berhasil\",\n \"HTMLPart\": opening + html_part + total + ending\n }]}\n ### SKIP SEND EMAIL WHEN TESTING ###\n if env == \"development\":\n mailjet.send.create(data=data)\n return {\"status\": \"SUCCESS\"}, 200, {\"Content-Type\": \"application/json\"}\n else:\n return {\"status\": \"EMPTY\"}, 200, {\"Content-Type\": \"application/json\"}\n\napi.add_resource(UserProfileResource, \"/profile\")\napi.add_resource(UserTransactionResource, \"/transaction\")\napi.add_resource(CartResource, \"/cart\")\napi.add_resource(DeleteProductFromCartResource, \"/cart/\")\napi.add_resource(DeliveryDetailResource, \"/send\")\napi.add_resource(PaymentDetailResource, \"/pay\")","sub_path":"blueprints/user/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":13934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"462106900","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import window\nfrom pyspark.sql.types import StructType\n\n\n# Ignoring duplicates\n\n\nif __name__==\"__main__\":\n spark= SparkSession.builder.appName(\"deduplication\").master(\"local[*]\").getOrCreate()\n spark.sparkContext.setLogLevel(\"ERROR\")\n\n # Here we define a guid to uniqely identify a record\n recordSchema=StructType().add(\"guid\",\"string\").add(\"timestamp\",\"timestamp\").add(\"val\", \"integer\")\n\n streamDF=spark.readStream.schema(recordSchema).option(\"sep\",\"\\t\").csv(\"../data/streaming-data/event-time-data\")\n\n print(streamDF.isStreaming)\n\n streamDF.withWatermark(\"timestamp\",\"5 seconds\")\n \n windowDF=streamDF.withWatermark(\"timestamp\", \"1 minutes\").groupBy(\n window(\"timestamp\",\"5 seconds\")\n ).count()\n\n # output data to a file sink. Here, we have written to parquet file\n # Allowed modes: parquet, kafka, memory etc.\n # query=windowDF.writeStream.format(\"parquet\").option(\"path\",\"../data/streaming-data/file-sink1\").option(\"checkpointLocation\",\"checkpoint-data1\").start()\n\n query=windowDF \\\n .writeStream \\\n .queryName(\"aggregates\") \\\n .outputMode(\"complete\") \\\n .format(\"memory\") \\\n .start()\n\n spark.sql(\"select * from aggregates\").show()\n \n query.awaitTermination()\n\n spark.stop()\n\n\n ","sub_path":"streaming_basics/file_sink.py","file_name":"file_sink.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"313841117","text":"from jarray.pytorch import JaggedArrayLinear\nimport torch\n\ndef model(idim):\n \"Simple PyTorch model for testing purposes\"\n model = torch.nn.Sequential(\n JaggedArrayLinear(idim, 5),\n torch.nn.ReLU(),\n torch.nn.Linear(5, 1),\n )\n return model\n\n\n","sub_path":"src/python/ex_pytorch.py","file_name":"ex_pytorch.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"392239213","text":"# -*- coding: utf-8 -*-\n\nimport dateutil\nimport os\nimport json\nimport click\n\nfrom flask import Flask, send_from_directory, render_template\n\napp = Flask(__name__)\n\napp.config[\"DATA\"] = {}\n\n\n@app.route(\"/assets/\")\ndef assets(file_path):\n asset_path = os.path.join(app.static_path, \"assets\")\n return send_from_directory(asset_path, file_path)\n\n\n@app.route(\"/\")\ndef theme():\n context = {\"site\": {},\n \"theme\": {\"social\": {}},\n \"post\": {},\n \"pagination\": {}}\n context.update(**app.config[\"DATA\"])\n return render_template(\"theme.html\", **context)\n\n\n@app.template_filter(\"format_date\")\ndef _format_date(date):\n nth = lambda n: str(n) + nth.ext[int(n) % 10]\n nth.ext = ['th', 'st', 'nd', 'rd'] + ['th'] * 6\n date = dateutil.parser.parse(date)\n native = date.replace(tzinfo=None)\n return \"\".join([native.strftime(\"%B \"),\n nth(native.tm_mday),\n native.strftime(\", %Y\")])\n\n\n@app.template_filter(\"format_tag\")\ndef _format_tag(tag, link=False, humanize=False):\n return tag.capital()\n\n\n@app.context_processor\ndef context_processor():\n return {\"assets\": lambda asset: \"/assets/{asset}\".format(asset=asset),\n \"static\": lambda asset: asset}\n\n\n@click.command()\n@click.argument(\"path\", type=click.Path(), default=\".\")\n@click.option(\"--data\", type=click.File(\"r\"), default=None)\ndef cli(path, data):\n path = os.path.abspath(path)\n app.config[\"DATA\"] = json.load(data) if data is not None else {}\n app.template_folder = path\n app.run(debug=True)\n","sub_path":"postachio.py","file_name":"postachio.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"420537537","text":"import reverse_throughput_process\nimport reverse_windows_scan\nimport asyncio\nimport websockets\n\ntempfile = \"./tempfile\"\n#server_ip = \"202.90.158.168\"\nserver_ip = \"35.185.183.104\"\nhandler = '10005'\nthroughput_port = '10011'\nrecv_window = 1000\nmtu = \"1460\"\nrtt = 16.034\nconnections = \"1\"\nmss = \"1080\"\n\nasync def test():\n res = await reverse_windows_scan.scan_process(\n tempfile = tempfile, \n SERVER_IP = server_ip,\n handler_port = handler,\n throughput_port = throughput_port,\n recv_window = recv_window,\n mtu = mtu,\n rtt = rtt,\n connections = connections,\n mss = mss)\n return res\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n group = asyncio.gather(test())\n all_groups = asyncio.gather(group)\n results = loop.run_until_complete(all_groups)\n loop.close()\n print(results)\n","sub_path":"test_rev_thpt.py","file_name":"test_rev_thpt.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"200028879","text":"ACHIEVEMENTS = [\n (\"old\", {\n \"name\": \"Олды здесь\",\n \"description\": \"Старый член Клуба\",\n \"image\": \"/static/images/badges/year-2.png\",\n \"style\": \"color: #FFF; background-color: #65c3ba;\",\n }),\n (\"investor\", {\n \"name\": \"Инвестор\",\n \"description\": \"\",\n \"image\": \"\",\n \"style\": \"\",\n }),\n (\"contributor\", {\n \"name\": \"Контрибьютор\",\n \"description\": \"Выдаётся за заметный вклад в код Вастрик.Клуба на гитхабе\",\n \"image\": \"\",\n \"style\": \"\",\n }),\n (\"moderator\", {\n \"name\": \"Модератор\",\n \"description\": \"\",\n \"image\": \"\",\n \"style\": \"\",\n }),\n]\n","sub_path":"common/data/achievements.py","file_name":"achievements.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"370628556","text":"from django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom html2text import HTML2Text\n\n\nINVITATION_TEMPLATE = 'maps/email/traveler_invitation_template.html'\nINVITATION_SUBJECT = 'maps/email/traveler_invitation_subject.txt'\nTRIP_REQUEST_TEMPLATE = 'maps/email/trip_request_template.html'\nTRIP_REQUEST_SUBJECT = 'maps/email/trip_request_subject.txt'\nTRIP_REQUEST_APPROVAL_TEMPLATE = 'maps/email/trip_request_approval_template.html'\nTRIP_REQUEST_APPROVAL_SUBJECT = 'maps/email/trip_request_approval_subject.txt'\nTRIP_REQUEST_DENIAL_TEMPLATE = 'maps/email/trip_request_denial_template.html'\nTRIP_REQUEST_DENIAL_SUBJECT = 'maps/email/trip_request_denial_subject.txt'\n\n\ndef _html2text(html):\n \"\"\"Convert HTML string to plaintext (Markdown).\"\"\"\n parser = HTML2Text()\n parser.ignore_emphasis = True\n parser.ignore_images = True\n parser.ignore_links = False\n return parser.handle(html).strip()\n\n\ndef send_invitation_email(traveler):\n \"\"\"\n Send an invitation email with the code that users will use to\n login and register for a trip section.\n \"\"\"\n subject_context = {\n 'full_name': traveler.full_name,\n }\n body_context = {\n 'traveler': traveler,\n }\n subject = render_to_string(INVITATION_SUBJECT, subject_context)\n html_message = render_to_string(INVITATION_TEMPLATE, body_context)\n message = _html2text(html_message)\n recipient = traveler.email\n sender = settings.DEFAULT_FROM_EMAIL\n\n send_mail(\n subject=subject,\n message=message,\n from_email=sender,\n recipient_list=[recipient],\n html_message=html_message,\n )\n\n\ndef send_trip_request_email(trip_section_request):\n \"\"\"\n Send a request to join a trip section. This will alert the\n admins when somebody requests to join a trip section. This\n can be configured via settings.\n \"\"\"\n traveler = trip_section_request.traveler\n\n subject_context = {\n 'trip_section_request': trip_section_request,\n 'full_name': traveler.full_name,\n }\n body_context = {\n 'trip_section_request': trip_section_request,\n 'traveler': traveler,\n }\n subject = render_to_string(TRIP_REQUEST_SUBJECT, subject_context)\n html_message = render_to_string(TRIP_REQUEST_TEMPLATE, body_context)\n message = _html2text(html_message)\n recipient = traveler.email\n sender = settings.DEFAULT_FROM_EMAIL\n\n send_mail(\n subject=subject,\n message=message,\n from_email=sender,\n recipient_list=[recipient],\n html_message=html_message,\n )\n\n\n\ndef send_trip_request_approval_email(trip_section_request):\n \"\"\"\n Send an approval for a requested trip section.\n \"\"\"\n traveler = trip_section_request.traveler\n\n subject_context = {\n 'trip_section_request': trip_section_request,\n 'full_name': traveler.full_name,\n }\n body_context = {\n 'trip_section_request': trip_section_request,\n 'traveler': traveler,\n }\n subject = render_to_string(TRIP_REQUEST_APPROVAL_SUBJECT, subject_context)\n html_message = render_to_string(TRIP_REQUEST_APPROVAL_TEMPLATE, body_context)\n message = _html2text(html_message)\n recipient = traveler.email\n sender = settings.DEFAULT_FROM_EMAIL\n\n send_mail(\n subject=subject,\n message=message,\n from_email=sender,\n recipient_list=[recipient],\n html_message=html_message,\n )\n\n\ndef send_trip_request_denial_email(trip_section_request):\n \"\"\"\n Send a denial for a requested trip section.\n \"\"\"\n traveler = trip_section_request.traveler\n\n subject_context = {\n 'trip_section_request': trip_section_request,\n 'full_name': traveler.full_name,\n }\n body_context = {\n 'trip_section_request': trip_section_request,\n 'traveler': traveler,\n }\n subject = render_to_string(TRIP_REQUEST_DENIAL_SUBJECT, subject_context)\n html_message = render_to_string(TRIP_REQUEST_DENIAL_TEMPLATE, body_context)\n message = _html2text(html_message)\n recipient = traveler.email\n sender = settings.DEFAULT_FROM_EMAIL\n\n send_mail(\n subject=subject,\n message=message,\n from_email=sender,\n recipient_list=[recipient],\n html_message=html_message,\n )\n","sub_path":"maps/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"335831610","text":"import openpyxl as OPX\nimport xlrd\nfrom collections import OrderedDict\nimport CoolProp.CoolProp as CP\nimport math\nimport os\nimport time\nimport pandas as pd\nimport IPython.display as ip\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.style.use('ggplot')\n\ndef ReadDataCols(mydatapath,myfilename):\n\n wb = xlrd.open_workbook(mydatapath + '/' + myfilename) #open workbook, .xlsx\n sheet_names = wb.sheet_names() #get all sheet names\n for item in sheet_names: #go through each sheet in the workbook \n if \"raw\" in item: continue #ignore 'raw data' sheets\n datasheet = wb.sheet_by_name(item) #after ignoring the 'raw data'\n #assign the datasheet variable as the next worksheet name\n ##print('sheet name ', item) \n rowx = datasheet.nrows #get number of used rows\n colx = datasheet.ncols #get number of used columns\n ##print('row count ', rowx)\n ##print('column count ', colx)\n\n DataCols = {} #initialize the DataCols dicitonary\n for i in range(0, rowx): #go through all rows, row index start from 0\n if bool(DataCols): break #if datacols has value\n #stop the for loop. when datacols has value in it, it\n #has got all data with proper headers, keep running the loop\n #will overwrite the dictionary\n \n for j in range(0, colx): #go through all columns\n x = datasheet.cell(i, j).value\n checklength = len(str(x))\n if checklength == 0: continue #if length of current cell\n #is 0, means empty cell, go to next column\n if not type(x) is float: #if cell value is not number\n #it is a text header\n DataCols[x] = datasheet.col_values(j, i + 1, rowx)\n #use the cell value as key, assign the rest of the\n #column as its value\n \n #print('header ',x)\n\n if min(DataCols['T_RSHO']) < min(DataCols['T_riR'])+3 :\n print('Problem: Superheater Saturated')\n return False\n #if the superheater outlet temp, T_RSHO gets below T_riR+5 degrees\n #return false, skip this data file, return True\n \n if min(DataCols['T_roR']) > min(DataCols['T_riR'])+3 :\n print('Problem: HX Outlet Superheated')\n return False\n #if the T_roR is larger than T_riR+3K, means the outlet reaches\n #super heated gas phase, skip this data file, return True\n \n return DataCols\n\n #returns a dictionary w/ key-header, item-data\n\ndef AverageDataCols(DataCols):\n AvgDataCols = {} #initialize the dicitonary\n \n for key, value in DataCols.items():\n #print(key)\n check = False\n for x in value:\n if not type(x) is float: #if one of the values is not a number\n check = True #set check as true\n AvgDataCols[key] = x #assign the non-numeric value\n #to the current key in the new dictionary\n #this is to avoid averaging on text values in the next\n #steps\n #print(x)\n break #break out to the outer for loop\n if check:continue #if check is true, skip the rest and go to next\n #iteration\n AvgDataCols[key] = float(sum(value)/len(value))\n # \n\n DataCols.clear() #destroy the raw data dictionary\n return AvgDataCols\n\ndef CtoK(x):\n x = x + 273.15\n return x\n\n\nclass Date_Period(object):\n\n def __init__(self, myfilename):\n check = False\n period = 0\n for char in myfilename:\n\n if char == '=': #if a letter is an '=' sign\n check = True #set the flag to True\n continue #go to next letter\n\n if not check: continue\n #if flag is False, means right now we are still before the '=' sign\n\n if char == 'i':\n #if the next char is 'i', that means it's infinite period\n #in this case the period is denoted as 0\n ##print(period)\n break\n\n if char == 's':\n #if the next char is 's', that is the end of the period\n ##print(period)\n break\n\n #if not float(char): continue #this doesn't work when char\n #is zero\n\n period = period * 10 + float(char)\n\n date = \"\"\n for i in range(8):\n date += myfilename[i] #the first 8 characters defines the date\n \n self.period = period\n self.date = date\n ##print('date = ',date)\n \n\nclass AvgVars(object):\n\n def __init__(self, mydict):\n\n self.m_dot_r = mydict['m_ri']/1000\n self.T_ri = CtoK(mydict['T_ri'])\n self.P_ri = mydict['P_eri']*1000\n self.T_pho = CtoK(mydict['T_PHO'])\n self.P_ai = mydict['P_atm'] * 1000\n self.Q_preheating = mydict['q_ri']\n self.m_dot_ro = mydict['m_ro'] / 1000\n self.T_aiR = CtoK(mydict['T_aiR'])\n self.T_ref = CtoK(mydict['T_ref'])\n self.T_anoR = CtoK(mydict['T_nR'])\n self.DP_anR = mydict['DP_naR']\n self.T_aoaveR = -1\n self.T_riR = CtoK(mydict['T_riR'])\n self.T_roR = CtoK(mydict['T_roR'])\n self.T_shR = CtoK(mydict['T_RSHO'])\n self.P_shR = mydict['P_eroR'] * 1000\n self.DP_rR = mydict['DP_erR'] * 1000\n self.Q_superheatingR = mydict['q_roR']\n self.Q_superheatingRx = self.Q_superheatingR\n self.T_aiL = CtoK(mydict['T_aiL'])\n self.T_anoL = CtoK(mydict['T_nL'])\n self.DP_anL = mydict['DP_naL']\n self.T_aoaveL = -1\n self.T_riL = CtoK(mydict['T_riL'])\n self.T_roL = CtoK(mydict['T_roL'])\n self.T_shL = CtoK(mydict['T_LSHO'])\n self.P_shL = mydict['P_eroL'] * 1000\n self.Q_superheatingL =mydict['q_roL']\n\n\nclass FlowChars(object):\n _registry = []\n\n def __init__(self, avgdata, constants, airmassflowrate):\n\n self._registry.append(self)\n\n T_aiR = avgdata.T_aiR\n T_anoR = avgdata.T_anoR\n T_airAvg = (T_aiR + T_anoR)/2\n #print('T_airAvg = ',T_airAvg)\n P_airAvg = avgdata.P_ai - avgdata.DP_anR/2\n cp_aRn = CP.PropsSI('C','P',P_airAvg,'T',T_airAvg,'Air')\n\n Q_aRn = airmassflowrate.m_dot_aR * cp_aRn * (T_aiR - T_anoR)\n\n G = avgdata.m_dot_r / constants.A_sec_r\n\n T_ri = avgdata.T_ri\n P_ri = avgdata.P_ri\n h_ri = CP.PropsSI('H','P',P_ri,'T',T_ri ,'R134a')\n\n T_shR = avgdata.T_shR\n P_shR = avgdata.P_shR\n h_roR = CP.PropsSI('H','P',P_shR,'T',T_shR ,'R134a')\n\n DP_anR = avgdata.DP_anR\n DP_anL = avgdata.DP_anL\n k = 1\n if (DP_anL > DP_anR * 0.3): k = 0.5\n\n self.Q_aRn = Q_aRn\n self.G = G\n G_nominal = 25*round(G*k/25)\n self.G_nom = G_nominal\n self.h_ri = h_ri\n self.cp_aRn = cp_aRn\n self.h_roR = h_roR\n self.k = k\n\n\nclass Constants(object):\n def __init__(self, tubenumber, tubelength):\n \n self.tubenum = tubenumber\n self.tubelength = tubelength\n self.exposefactor = self.tubenum/24 * self.tubelength/385\n self.k_fin = 170 #W/m-K\n self.t_fin = 0.13 * 1e-3 #m\n self.s_t = 25.4 * 1e-3 #m\n self.s_l = 16.67 * 0.5 * 1e-3 #m\n self.s = 385/235 * 1e-3 #m\n #pitch by a fin thickness\n self.R_outer = 8.5/2 * 1e-3 #m\n self.X_M = self.s_t / 2\n self.X_L = (self.X_M**2 + self.s_l**2)**0.5/2\n self.R_ratio = 1.27*self.X_M/self.R_outer * (self.X_L/self.X_M-0.3)**0.5\n self.phi = (self.R_ratio-1)*(1+0.35*math.log(self.R_ratio))\n self.L_fin = self.R_outer * self.phi\n self.A_a_fin = 4.23 * self.exposefactor #{in m^2}\n self.A_a_base = 0.2466 * self.exposefactor #{in m^2}\n self.A_a_total = self.A_a_fin + self.A_a_base #{in m^2}\n self.A_a_ratio = self.A_a_fin / self.A_a_total\n #\"end of fin and air side geometry\"\n self.T_wb_ai = 13 #{in C}\n self.D_r = 0.0062 #{in m}\n self.R_MR = 8.3\n self.R_ML = 8.5\n self.R_MP = 8.7 #{in om for resistant}\n self.Refrigerant ='R134a'\n self.A_sec_r = math.pi/4*self.D_r**2 #{in m^2}\n self.A_all = math.pi*self.D_r*self.tubenum*0.385 * (self.tubelength/385)\n #{the actual number of tubes participate in\n #heat transfer}\n\nclass AirMassRate(object):\n def __init__(self,constant,avgdata):\n\n D_no=4*0.0254 #{in m}\"nozzle outlet diameter\"\n D_ni=0.305 #{in m}\"nozzle inlet diameter\"\n A_ni=math.pi*D_ni**2/4 #{in m^2}\"nozzle inlet area\"\n A_no=math.pi*D_no**2/4 #{in m^2}\"nozzle outlet area\"\n D_a=0.3 * constant.tubenum/24\n #{in m}\"wind tunnel cross section a\"\n D_b=0.385 * constant.tubelength/385\n #{in m}\"wind tunnel cross section b\"\n D_h=2*D_a*D_b/(D_a+D_b)\n #\"hydraulic diameter of the wind tunnel\"\n\n # \"Air density caculation\"\n R_airconstant=287.1 #{in J/kg.K} \"gas constant of air\"\n P_wsvp=3.25*10**(-3)*(constant.T_wb_ai)**2-0.0186*(constant.T_wb_ai)+0.692\n #{in kPa}\"the saturated vapor pressure for the wet-bulb temperature at evaporater inlet\"\n P_p=P_wsvp-avgdata.P_ai*((avgdata.T_aiR - constant.T_wb_ai - 273.15)/1500)\n # {in kPa}\"the partial water vapor pressure at the evaporater inlet\"\n rho_ai=(avgdata.P_ai-0.378*P_p)/(R_airconstant*(avgdata.T_aiR ))\n # {in kg/m3}\"air density at evaporater inlet\"\n\n P_ani=avgdata.P_ai\n # {in kPa} \"nozzle inlet pressure,and here the pressure drop through the evaporater is omitted.\"\n P_ano=P_ani-avgdata.DP_anR/1000\n #{in kPa} \"nozzle inlet pressure\"\n rho_ani=(rho_ai*P_ano*(avgdata.T_aiR ))/(avgdata.P_ai*(avgdata.T_anoR ))\n #{kg/m3}\"nozzle inlet air density\"\n rho_ano=(rho_ai*P_ano*(avgdata.T_aiR ))/(avgdata.P_ai*(avgdata.T_anoR))\n #{kg/m3}\"nozzle outlet air density\"\n\n # \"air flow rate caculation\"\n alpha=1-(avgdata.DP_anR/(rho_ani*R_airconstant*(avgdata.T_anoR)))\n # \"alpha ratio, absolute nozzle exit pressure to absolute approach pressure\" \"The approach pressure means inlet pressure.\"\n beta=D_no/D_h #\"beta ratio, nozzle exit diameter to approach duct diameter\"\n Y=1-(0.548+0.71*beta**4)*(1-alpha) #\"expansion factor\"\n C_guess=0.996 #\"C_guess still needs to be improved\"\n C=C_guess\n U_ani=C*A_no*Y * (2*avgdata.DP_anR/rho_ani)**0.5\n #{in m3/s}\"volume flow rate at nozzle inlet\"\n #\"C is the discharge coefficient\"\n self.m_dot_aR = U_ani*rho_ani #{in kg/s}\n #\"mass flow rate at anywhere along the chamber\"\n self.v_ai=self.m_dot_aR/rho_ani/(D_a*D_b/2) #{in m/s}\n #\"volume flow rate at anywhere along the chamber\"\n\n #v_ano=U_ani*(rho_ani/rho_ano)/A_no #{in m/s}\"air velocity at nozzle outlet\"\n #mu_ano = (17.23+0.048*avgdata.T_ano)*10^(-6) # {N.s/m^2}\"Air viscosity at nozzle outlet\"\n #Re=(rho_ano*v_ano*D_no)/mu_ano #\"Reynolds number at nozzle outlet\"\n #Re_cor=70900*D_no*(avgdata.DP_an*rho_ani/(1-beta^4))^0.5 #\"Reynolds number at nozzle outlet, correlation from the Standard\"\n\n\nclass AirHTC(object):\n def __init__(self,constant,avgdata,airmassflowrate):\n\n cp_a = CP.PropsSI('C','P',avgdata.P_ai,'T',avgdata.T_aiR ,'Air')\n mu_a = CP.PropsSI('V','P',avgdata.P_ai,'T',avgdata.T_aiR ,'Air')\n k_a = CP.PropsSI('L','P',avgdata.P_ai,'T',avgdata.T_aiR ,'Air')\n Pr_a = mu_a*cp_a/k_a\n D_h_a = 0.003623\n A_fr = 0.385*0.3*(constant.exposefactor)\n rownumber = 2\n s_t = 25.4/1000 #{in m}\n s_l = 16.67/2/1000 #{in m}\n s = (constant.tubelength/235 - constant.t_fin)/1000\n G_a = airmassflowrate.m_dot_aR/A_fr\n Re_a = G_a * D_h_a / mu_a\n #{An EES compact heat exchanger 'fc_tubes_s80-38T' model is used here, which is not same as our test heat exchanger}\n #{Correlations for air-side plain fin-and-tube heat exchangers with staggered tube arrangements of Kim-Youn-Webb,1999}\n j_H_a_r3 = 0.163* Re_a**(-0.369) * (s_t/s_l)**0.106 * (s/D_h_a)**0.0138 * (s_t/D_h_a)**0.13\n Ratio=1.043 * ( Re_a**(-0.14) * (s_t/s_l)**(-0.564) * (s/D_h_a)**(-0.123) * (s_t/D_h_a)**1.17 )**(3-rownumber)\n j_H_a = j_H_a_r3 * Ratio\n St = j_H_a * Pr_a**(-2/3)\n #\"Stanton number\"\n htc_a = St * G_a * cp_a\n\n #\"Fin: rectangular\"\n b = constant.t_fin #{m} #{fin thickness}\n m = (2* htc_a /(constant.k_fin * b))**0.5\n W = 15 * 1e-3 / 2 #{m}\n L = 24 * 1e-3 / 2 #{m}\n ri = 8.5 * 1e-3 / 2 #{m}\n pse = W/ri\n beta = L/W\n ro = 1.28 * pse * (beta - 0.2)**0.5 * ri\n phi = (ro/ri -1) * (1 + 0.35 * math.log(ro/ri))\n eta_surface = math.tanh(m*ri*phi) / (m*ri*phi)\n\n self.eta_surface = eta_surface\n self.htc_a = htc_a\n\n\nclass QDot(object):\n def __init__(self,constant,avgdata,airmassflowrate,flowchars):\n\n\n if avgdata.Q_superheatingR < 0.1:\n Q_rRM = flowchars.Q_aRn\n #because superheater is off, we only use air side rate of heat transfer\n Q_Rn = flowchars.Q_aRn\n else:\n Q_rRM = flowchars.k * avgdata.m_dot_r * (flowchars.h_roR - flowchars.h_ri) - avgdata.Q_preheating * flowchars.k - avgdata.Q_superheatingR\n## Q_Rn = (Q_rRM + flowchars.Q_aRn) / 2\n Q_Rn = Q_rRM\n\n diat1R = (flowchars.Q_aRn-Q_rRM)/Q_rRM\n #energy balance using nozzle outlet temprature\"\n diat2R = (flowchars.Q_aRn-Q_Rn)/Q_Rn\n #\"energy balance with measured valtage and evaporator outlet average temperature\"\n T_sat_1 = CP.PropsSI('T','P',avgdata.P_ri,'Q',0.5,'R134a')\n #\"see the temperature before preheating saturated or not\"\n T_sat_2 = CP.PropsSI('T','P',avgdata.P_ri - 3400,'Q',0.5,'R134a')\n #\"see the inlet of evaporator saturated or not\"\n T_sat_3 = CP.PropsSI('T','P',avgdata.P_shR,'Q',0.5,'R134a')\n #\"see outlet of evaporator saturated or not\"\n\n if avgdata.Q_preheating < 0.1:#\"no pre-heating, the evaporator inlet can be 3 Kevin subcool or saturated liquid\"\n x_reiRM = 0 #set as saturated liquid\"\n EC1 = math.fabs(avgdata.T_riR - avgdata.T_ri)\n else:#with preheater on\"\n EC1 = math.fabs(avgdata.T_riR - T_sat_2)\n h_riR = flowchars.h_ri + avgdata.Q_preheating / avgdata.m_dot_r\n\n h_riR = flowchars.h_ri + avgdata.Q_preheating / avgdata.m_dot_r\n\n if EC1 < 3:#\"if the temperature difference smaller than 3K at the inlet, consider it as two phase\n h_ri_fluid = CP.PropsSI('H','T',avgdata.T_riR ,'Q',0,'R134a')\n h_ri_gas = CP.PropsSI('H','T',avgdata.T_riR ,'Q',1,'R134a')\n x_reiRM = (h_riR - h_ri_fluid)/(h_ri_gas - h_ri_fluid)\n else:#\"if the temperature difference larger than 3K, which means the inlet flow is still subcool liquid\"\n x_reiRM = 0\n\n h_roR = h_riR + Q_Rn / (avgdata.m_dot_r * flowchars.k)\n h_ro_fluid = CP.PropsSI('H','T',avgdata.T_roR ,'Q',0,'R134a')\n h_ro_gas = CP.PropsSI('H','T',avgdata.T_roR ,'Q',1,'R134a')\n x_reoRM = (h_roR - h_ro_fluid)/(h_ro_gas - h_ro_fluid)\n\n x_reiRM_nom = round(x_reiRM,1)\n x_reoRM_nom = round(x_reoRM,1)\n \n self.Q_rRM = Q_rRM\n self.Q_Rn = Q_Rn\n self.x_reiRM = x_reiRM\n self.x_reoRM = x_reoRM\n self.x_reiRM_nom = x_reiRM_nom\n self.x_reoRM_nom = x_reoRM_nom\n self.diat1R = diat1R\n self.diat2R = diat2R\n\nclass UALMTD(object):\n def __init__(self,constant,avgdata,airmassflowrate,flowchars,qdot):\n\n P_roR = avgdata.P_ri - 3400 - avgdata.DP_rR\n #\"evaporator inlet pressure, because the temperature is saturation temperature\"\n #T_ro_sat = CP.PropsSI('T','P',P_roR ,'Q',0.5,'R134a')\n T_ro_sat = avgdata.T_roR\n ##print('T_ro_sat = ', T_ro_sat)\n #\"set the evaporator outlet temperature the same as the saturation temperature, for calculation purposes\"\n #T_ri_sat = CP.PropsSI('T','P',P_roR + avgdata.DP_rR ,'Q',0.5,'R134a')\n T_ri_sat = avgdata.T_riR\n ##print('T_ri_sat = ', T_ri_sat)\n T_r1 = (T_ri_sat+T_ro_sat)/2\n DT1 = avgdata.T_aiR - T_r1\n DT2 = avgdata.T_anoR - T_r1\n MTD = (DT1 - DT2)/math.log(DT1/DT2)\n UA_LMTD = qdot.Q_Rn /MTD\n self.MTD = MTD\n self.UA_LMTD = UA_LMTD\n self.UA_NTU = -1\n self.T_ri_sat = T_ri_sat\n self.T_ro_sat = T_ro_sat\n\nclass HTCRFG(object):\n def __init__(self,constant,avgdata,airhtc,airmassflowrate,flowchars,qdot,ualmtd):\n Rair = 1/(airhtc.htc_a * constant.A_a_total )\n hc = 95*1e3 #contact conductance, W/m2-K\n Ao = constant.A_all * 8.5/6.2\n #8.5 mm is the outer diameter, 6.2mm is the inner diameter\n htc_rNTU = -1\n htc_rLMTD = 1/constant.A_all * (1/ualmtd.UA_LMTD * airhtc.eta_surface - Rair - 1/(hc*Ao))**(-1)\n self.htc_rNTU = htc_rNTU\n self.htc_rLMTD = htc_rLMTD\n ##print('htc_rLMTD=',htc_rLMTD)\n\n\n##def headerlist(ws,myclasslist):\n## row = 1\n## col = 1\n## keylist = []\n## for myclass in myclasslist:\n## mydict = myclass.__dict__\n## #mydict is the dictionary of the class attributes that contains\n## #calculation results\n## #__dict__ is a built in function of class\n## for key,value in mydict.items():\n## keylist.append(key) \n##\n## keylist.sort()\n## \n## for key in keylist:\n## ws.cell(row=row,column=col).value = key\n## col += 1\n## \n## return keylist\n\nclass dfgroup(object):\n def __init__(self):\n self.gpnum = 1 #data group number that will go into the data frame\n self.parenthtc = 0 #the htc of zero period in the group\n self.df = pd.DataFrame()\n self.df_sub = pd.DataFrame()\n self.dflist = [] #data frame list for concatination\n self.keylist = [] #key list for concatination\n self.cctdf = pd.DataFrame()\n\n\ndef grouper(myclasslist,gp):\n# obtain values from the class gp\n gpnum = gp.gpnum\n parenthtc = gp.parenthtc\n df = gp.df\n df_sub = pd.DataFrame()\n dflist = gp.dflist\n keylist = gp.keylist\n\n for myclass in myclasslist: #for each class in the list of class\n mydict = myclass.__dict__ #get the dicitonary of class attrubutes\n odd = OrderedDict(sorted(mydict.items(), key=lambda t:t[0]))\n #use order-dict to write a ordered dictionary instead of\n #look up with keys\n df_sub2 = pd.DataFrame([list(odd.values())],columns=list(odd.keys()))\n #assign keys as headers and values as values into a fresh dataframe\n df_sub = df_sub.join(df_sub2, how='outer')\n #merge df_sub with df_sub2 exclusively\n\n if len(df.tail(1)) == 0:\n parenthtc = df_sub['htc_rLMTD']\n## df_sub['gp_id'] = gpnum\n\n elif df_sub['period'].tolist() == [0]:\n #if the last row in dataframe is not empty and\n #if the current period is zero\n #add the current dataframe to the dataframe dictioanry before\n #appending the current row\n dflist.append(df)\n keylist.append(str(gpnum))\n\n gpnum += 1\n df = pd.DataFrame()\n parenthtc = df_sub['htc_rLMTD']\n \n df_sub['EnhanceRatio'] = df_sub['htc_rLMTD']/parenthtc\n df = df.append(df_sub,ignore_index=True) #append df_sub to df\n\n #assign results to the class gp\n gp.gpnum = gpnum #data group number that will go into the data frame\n gp.parenthtc = parenthtc #the htc of zero period in the group\n gp.df = df\n gp.df_sub = df_sub\n gp.keylist = keylist\n return gp\n\n\ndef ProcessAll(filelist):\n myclasslist = [] #initialize myclasslist\n gp = dfgroup()\n last = len(filelist)-1\n \n for i, file in enumerate(filelist):\n print('Prcessing ',file)\n if i == last and len(gp.df)!=0:\n if ('fast' in file.lower()) or ('dp' in file.lower()) or (not '=' in file) or ('sync' in file.lower()):\n #if is the last normal data file in the list\n gp.dflist.append(gp.df)\n gp.keylist.append(str(gp.gpnum))\n\n if 'fast' in file.lower():\n continue #if the word fast in filename, means\n #it's not a normal data file\n #if 'fast' in file: continue #filename screening\n if 'dp' in file.lower():\n continue\n if not '=' in file:\n continue #if no = sign, it is not a normal data file\n if 'sync' in file.lower():\n continue\n\n date_period = Date_Period(file)\n rawdatadict = ReadDataCols(datapath,file)\n \n if type(rawdatadict) == bool:\n print('Skipped ',file)\n continue\n #if the type of rawdatadict is a boolean\n #there is problem with the data, skip this data file\n \n avgdatadict = AverageDataCols(rawdatadict) \n avgdata = AvgVars(avgdatadict)\n if (avgdata.Q_preheating < 1) or (avgdata.Q_superheatingR < 1):\n continue\n #if preheater power is extremely low, ignore\n #if superheater power is extremely low, ignore\n tubenumber = 8\n tubelength = 385\n constant = Constants(tubenumber,tubelength)\n airmassflowrate = AirMassRate(constant, avgdata)\n airhtc = AirHTC(constant,avgdata,airmassflowrate)\n flowchars = FlowChars(avgdata, constant, airmassflowrate)\n qdot = QDot(constant,avgdata,airmassflowrate,flowchars)\n\n if qdot.x_reiRM < 0.05:\n continue\n #if inlet quality smaller than 0.05, ignore\n \n ualmtd = UALMTD(constant,avgdata,airmassflowrate,flowchars,qdot)\n htcrfg = HTCRFG(constant,avgdata,airhtc,airmassflowrate,flowchars,qdot,ualmtd)\n\n #create a list of wanted classes\n myclasslist = [date_period,avgdata,constant,airmassflowrate,airhtc,\n flowchars,qdot,ualmtd,htcrfg]\n gp = grouper(myclasslist,gp)\n print('Prcessed')\n \n## print(gp.keylist)\n gp.cctdf = pd.concat(gp.dflist, keys=gp.keylist)\n\n return gp\n\ndef plot(gp):\n cctdf = gp.cctdf\n keylist = gp.keylist\n ##print(keylist)\n marker = itertools.cycle((',', '+', '.', 'o', '*','v','^','<','>','8', 's', 'p', 'h', 'H', 'D', 'd'))\n\n j = 0\n fldict = {}\n \n for i in range(len(keylist)):\n key = keylist[i]\n df = cctdf.ix[key]\n x = df.period\n \n if len(x) <= 1:\n## print(x)\n continue\n \n #df.ix[key] is accessing an element in concactenated dataframe by key\n## y = df.EnhanceRatio\n y = df.htc_rLMTD\n dTsh = df.T_aiR[0] - df.T_riR[0]\n #amount of superheat, T_aiR - T_riR\n l = '{0} {1} {2}'.format(df.date[0], round(dTsh,1), round(df.x_reoRM[0],2))\n #l is the label for this x-y set, outlet quality of cont. flow\n\n flid = 'G{0} x{1}'.format(df.G_nom[0],df.x_reiRM_nom[0])\n print(flid)\n #flid is Flow ID\n if not flid in fldict:\n j += 1\n fldict[flid] = j\n plt.figure(j)\n plt.title(flid)\n\n fgid = fldict[flid]\n \n\n plt.figure(fgid)\n plt.scatter(x,y,s=60,marker=next(marker),label=l)\n\n legend = plt.legend(loc='upper right',shadow = True)\n frame = legend.get_frame()\n frame.set_facecolor('0.90')\n\n # Set the fontsize\n for label in legend.get_texts():\n label.set_fontsize(14)\n\n for label in legend.get_lines():\n label.set_linewidth(1.5) # the legend line width\n\n plt.show() #show the plot\n\n \ndef find_xlsx_filenames( path_to_dir, suffix=\".xlsx\" ):\n \n filenames = os.listdir(path_to_dir)\n return [ filename for filename in filenames if filename.endswith( suffix ) ]\n\n################################main###################################\n\nt0 = time.clock()\n\nmainpath = 'D:/Dropbox/Public/UIUC/Research/Data/Normal/Comparable Data Sets'\ndatapath = mainpath + '/Data_xlsx'\noutputpath = mainpath + '/Output and Summary'\n\nfilelist = find_xlsx_filenames(datapath)\ngp = ProcessAll(filelist)\nwriter = pd.ExcelWriter(outputpath + '/Output Summary.xlsx')\ngp.cctdf.to_excel(writer,'Sheet1')\nwriter.save()\nplot(gp)\n\nprint('Processing Time = ', time.clock() - t0)\n","sub_path":"Reserve/Pulsating flow results - obsolete.py","file_name":"Pulsating flow results - obsolete.py","file_ext":"py","file_size_in_byte":24554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"48120486","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nfrom colorama import Fore\nimport os\nimport json\n\n\ndef read_int(default_value=0):\n ret = input()\n if ret == '' or ret is None:\n return default_value\n while not ret.isnumeric():\n ret = input(\"Please input a legal number: \")\n if ret == '' or ret is None:\n return default_value\n return int(ret)\n\n\ndef get_yes_or_no_option(option_description):\n print(Fore.LIGHTBLUE_EX + ' ? ' + Fore.RESET + option_description, end='')\n option = input()\n yes_options = [\"y\", \"yes\", \"Y\", \"Yes\", \"YES\"]\n no_options = [\"n\", \"no\", \"N\", \"No\", \"NO\"]\n while (option not in yes_options) and (option not in no_options):\n option = input(\"This option can only be Yes or No, please input again: \")\n return option in yes_options\n\n\ndef get_int_option(option_description, min_option, max_option, default_option):\n print(Fore.LIGHTBLUE_EX + ' ? ' + Fore.RESET + option_description, end='')\n option = read_int(default_option)\n while option < min_option or option > max_option:\n print(\"The range of options is {}-{}, please input again: \".format(min_option, max_option), end='')\n option = read_int(default_option)\n return option\n\n\ndef get_command_list(cmd, num=2):\n '''Get last executed command from local log files'''\n history_file_name = os.path.join(cmd.cli_ctx.config.config_dir, 'recommendation', 'cmd_history.log')\n if not os.path.exists(history_file_name):\n return _get_command_list_from_core(cmd, num)\n with open(history_file_name, \"r\") as f:\n lines = f.read().splitlines()\n lines = [x for x in lines if x != 'next']\n return lines[-num:]\n\n # If the historical execution record is not found in the file recorded by \"az next\",\n # it may be the first time that \"az next\" is installed.\n # At this time, we will take the history recorded in the file under commands directory\n return _get_command_list_from_core(cmd, num)\n\n\ndef _get_command_list_from_core(cmd, num=2):\n commands_history_dir = os.path.join(cmd.cli_ctx.config.config_dir, 'commands')\n if not os.path.isdir(commands_history_dir):\n return []\n\n command_file_list = os.listdir(commands_history_dir)\n command_file_list.sort(key=lambda fn: fn, reverse=True)\n command_list = []\n for item in command_file_list:\n if 'next' in item or 'extension_add' in item or 'unknown_command' in item:\n continue\n command_info = _parse_command_file(os.path.join(commands_history_dir, item))\n if command_info:\n command_list.insert(0, command_info)\n if len(command_list) == num:\n return command_list\n\n return command_list\n\n\ndef _parse_command_file(command_file_path):\n if not os.path.exists(command_file_path):\n return \"\"\n\n with open(command_file_path, \"r\") as f:\n first_line = f.readline()\n if not first_line:\n return \"\"\n line_items = first_line.split('command args: ')\n if len(line_items) != 2:\n return \"\"\n command_str = line_items[1]\n if not command_str:\n return \"\"\n\n items = command_str.split()\n commend_items = []\n argument_items = []\n for item in items:\n if item.startswith('-'):\n argument_items.append(item)\n elif not item.startswith('{'):\n commend_items.append(item)\n\n command_info = {'command': ' '.join(commend_items)}\n if argument_items:\n command_info['arguments'] = argument_items\n\n return json.dumps(command_info)\n\n\ndef get_last_exception(cmd):\n '''Get last executed command from local log files'''\n import os\n history_file_name = os.path.join(cmd.cli_ctx.config.config_dir, 'recommendation', 'exception_history.log')\n if not os.path.exists(history_file_name):\n return ''\n with open(history_file_name, \"r\") as f:\n lines = f.read().splitlines()\n return lines[-1]\n return ''\n\n\ndef get_title_case(str):\n if not str:\n return str\n str = str.strip()\n return str[0].upper() + str[1:]\n\n\ndef _is_modern_terminal():\n # Windows Terminal: https://github.com/microsoft/terminal/issues/1040\n if 'WT_SESSION' in os.environ:\n return True\n # VS Code: https://github.com/microsoft/vscode/pull/30346\n if os.environ.get('TERM_PROGRAM', '').lower() == 'vscode':\n return True\n return False\n\n\ndef is_modern_terminal():\n \"\"\"Detect whether the current terminal is a modern terminal that supports Unicode and\n Console Virtual Terminal Sequences.\n Currently, these terminals can be detected:\n - Windows Terminal\n - VS Code terminal\n \"\"\"\n # This function wraps _is_modern_terminal and use a function-level cache to save the result.\n if not hasattr(is_modern_terminal, \"return_value\"):\n setattr(is_modern_terminal, \"return_value\", _is_modern_terminal())\n return getattr(is_modern_terminal, \"return_value\")\n\n\ndef print_successful_styled_text(message):\n\n from azure.cli.core.style import print_styled_text, Style\n prefix_text = '\\nDone: '\n if is_modern_terminal():\n prefix_text = '\\n(✓ )Done: '\n print_styled_text([(Style.SUCCESS, prefix_text), (Style.PRIMARY, message)])\n","sub_path":"src/next/azext_next/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"236714136","text":"#Problem 886:Possible Bipartition\n\n# Given a set of N people(numbered 1, 2, ..., N), we would like to split everyone into two groups of any size.\n\n# Each person may dislike some other people, and they should not go into the same group.\n\n# Formally, if dislikes[i] = [a, b], it means it is not allowed to put the people numbered a and b into the same group.\n\n# Return true if and only if it is possible to split everyone into two groups in this way.\n\n\n# Example 1:\n\n# Input: N = 4, dislikes = [[1, 2], [1, 3], [2, 4]]\n# Output: true\n# Explanation: group1[1, 4], group2[2, 3]\n# Example 2:\n\n# Input: N = 3, dislikes = [[1, 2], [1, 3], [2, 3]]\n# Output: false\n# Example 3:\n\n# Input: N = 5, dislikes = [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5]]\n# Output: false\n\nfrom queue import Queue\n\n\nclass Solution:\n def possibleBipartition(self, N: int, dislikes: List[List[int]]) -> bool:\n visited = [0] * N\n graph = [[] for _ in range(N)]\n for edge in dislikes:\n u = edge[0] - 1\n v = edge[1] - 1\n graph[u].append(v)\n graph[v].append(u)\n\n q = Queue()\n for i in range(0, N):\n if visited[i] != 0:\n continue\n visited[i] = 1\n q.put(i)\n while not q.empty():\n s = q.qsize()\n for j in range(0, s):\n u = q.get()\n for k in range(0, len(graph[u])):\n v = graph[u][k]\n if visited[v] == 0:\n visited[v] = 2 if visited[u] == 1 else 1\n q.put(v)\n\n if visited[v] == visited[u]:\n return False\n\n return True\n","sub_path":"solutions_python/possible_bipartition.py","file_name":"possible_bipartition.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"515373120","text":"import os.path\n\nfrom itertools import chain\n\nfrom . import builtin\nfrom .packages import system_executable\nfrom ..build_inputs import Directory, Edge, File, Phony, objectify, sourcify\nfrom ..file_types import *\nfrom ..iterutils import iterate, listify\nfrom ..path import Path, Root\nfrom ..shell import posix as pshell\nfrom .. import version as _version\n\nclass TestCase(object):\n def __init__(self, target, options, env):\n self.target = target\n self.options = options\n self.env = env\n\nclass TestDriver(object):\n def __init__(self, target, options, env):\n self.target = target\n self.options = options\n self.env = env\n self.tests = []\n\nclass ObjectFiles(list):\n def __getitem__(self, key):\n if isinstance(key, basestring):\n key = Path(key, Root.srcdir)\n elif isinstance(key, File):\n key = key.path\n\n if isinstance(key, Path):\n for i in self:\n if i.creator and i.creator.file.path == key:\n return i\n raise ValueError(\"{!r} not found\".format(key))\n else:\n return list.__getitem__(self, key)\n\n#####\n\nclass Compile(Edge):\n def __init__(self, build, env, name, file, include=None,\n system_include=None, packages=None, options=None, lang=None,\n extra_deps=None):\n if name is None:\n name = os.path.splitext(file)[0]\n include = [sourcify(i, HeaderDirectory) for i in iterate(include)]\n system_include = [sourcify(i, HeaderDirectory) for i in iterate(system_include)]\n\n self.file = sourcify(file, SourceFile, lang=lang)\n self.builder = env.compiler(self.file.lang)\n self.include = sum((i.includes for i in iterate(packages)), include)\n self.system_include = sum((i.system_includes for i in iterate(packages)), system_include)\n self.options = pshell.listify(options)\n self.internal_options = []\n\n target = self.builder.output_file(name, self.file.lang)\n Edge.__init__(self, build, target, extra_deps)\n\nclass Link(Edge):\n __prefixes = {\n 'executable': '',\n 'static_library': 'lib',\n 'shared_library': 'lib',\n }\n\n @classmethod\n def __name(cls, name, mode):\n head, tail = os.path.split(name)\n return os.path.join(head, cls.__prefixes[mode] + tail)\n\n def __init__(self, builtins, build, env, mode, name, files, include=None,\n system_include=None, libs=None, packages=None,\n compile_options=None, link_options=None, lang=None,\n extra_deps=None):\n # XXX: Try to detect if a string refers to a shared lib?\n libs = [sourcify(i, Library, StaticLibrary) for i in iterate(libs)]\n if libs:\n extra_deps = list(chain(iterate(extra_deps), libs))\n self.files = builtins['object_files'](\n files, include, system_include, packages, compile_options, lang\n )\n if len(self.files) + sum(1 for i in libs if isinstance(i, WholeArchive)) == 0:\n raise ValueError('need at least one source file or whole archive')\n\n def langs():\n if lang:\n yield lang\n for i in self.files:\n yield i.lang\n\n self.builder = env.linker(langs(), mode)\n self.libs = sum((i.libraries for i in iterate(packages)), libs)\n\n lib_dirs = (self.builder.lib_dirs(i.lib_dirs)\n for i in iterate(packages))\n self.options = sum(lib_dirs, pshell.listify(link_options))\n self.name = self.__name(name, mode)\n\n target = self.builder.output_file(name)\n for c in (i.creator for i in self.files if i.creator):\n c.internal_options.extend(c.builder.link_args(self.name, mode))\n if getattr(self.builder, 'post_install', None):\n target.post_install = self.builder.post_install\n\n build.fallback_default = target\n Edge.__init__(self, build, target, extra_deps)\n\nclass Alias(Edge):\n def __init__(self, build, name, deps=None):\n Edge.__init__(self, build, Phony(name), deps)\n\nclass Command(Edge):\n def __init__(self, build, name, cmd=None, cmds=None, environment=None,\n extra_deps=None):\n if (cmd is None) == (cmds is None):\n raise ValueError('exactly one of \"cmd\" or \"cmds\" must be specified')\n elif cmds is None:\n cmds = [cmd]\n\n self.cmds = cmds\n self.env = environment or {}\n Edge.__init__(self, build, Phony(name), extra_deps)\n\n#####\n\n@builtin\ndef source_file(name, lang=None):\n # XXX: Add a way to make a generic File object instead of a SourceFile?\n return SourceFile(name, root=Root.srcdir, lang=lang)\n\n@builtin\ndef header(name):\n return HeaderFile(name, root=Root.srcdir)\n\n@builtin\ndef header_directory(directory):\n return HeaderDirectory(directory, root=Root.srcdir)\n\n@builtin\ndef whole_archive(lib):\n return WholeArchive(lib)\n\n@builtin.globals('build_inputs', 'env')\ndef object_file(build, env, name=None, file=None, *args, **kwargs):\n if file is None:\n if name is None:\n raise TypeError('expected name')\n return ObjectFile(name, root=Root.srcdir, *args, **kwargs)\n else:\n return Compile(build, env, name, file, *args, **kwargs).target\n\n@builtin.globals('build_inputs', 'env')\ndef object_files(build, env, files, *args, **kwargs):\n def _compile(file, *args, **kwargs):\n return Compile(build, env, None, file, *args, **kwargs).target\n return ObjectFiles(objectify(i, ObjectFile, _compile, *args, **kwargs)\n for i in iterate(files))\n\n@builtin.globals('builtins', 'build_inputs', 'env')\ndef executable(builtins, build, env, name, files=None, *args, **kwargs):\n if files is None:\n return Executable(name, root=Root.srcdir, *args, **kwargs)\n else:\n return Link(builtins, build, env, 'executable', name, files, *args,\n **kwargs).target\n\n@builtin.globals('builtins', 'build_inputs', 'env')\ndef static_library(builtins, build, env, name, files=None, *args, **kwargs):\n if files is None:\n return StaticLibrary(name, root=Root.srcdir, *args, **kwargs)\n else:\n return Link(builtins, build, env, 'static_library', name, files, *args,\n **kwargs).target\n\n@builtin.globals('builtins', 'build_inputs', 'env')\ndef shared_library(builtins, build, env, name, files=None, *args, **kwargs):\n if files is None:\n # XXX: What to do here for Windows, which has a separate DLL file?\n return SharedLibrary(name, root=Root.srcdir, *args, **kwargs)\n else:\n return Link(builtins, build, env, 'shared_library', name, files, *args,\n **kwargs).target\n\n@builtin.globals('build_inputs')\ndef alias(build, *args, **kwargs):\n return Alias(build, *args, **kwargs).target\n\n@builtin.globals('build_inputs')\ndef command(build, *args, **kwargs):\n return Command(build, *args, **kwargs).target\n\n#####\n\n@builtin.globals('build_inputs')\ndef default(build, *args):\n if len(args) == 0:\n raise ValueError('expected at least one argument')\n build.default_targets.extend(i for i in args if i.creator)\n\n@builtin.globals('builtins', 'build_inputs')\ndef install(builtins, build, *args, **kwargs):\n def _flatten(args):\n for i in args:\n for j in i.all:\n yield j\n\n if len(args) == 0:\n raise ValueError('expected at least one argument')\n all_files = kwargs.pop('all', True)\n\n for i in _flatten(args) if all_files else args:\n if isinstance(i, Directory):\n build.install_targets.directories.append(i)\n else:\n builtins['default'](i)\n build.install_targets.files.append(i)\n\n@builtin.globals('build_inputs')\ndef test(build, test, options=None, environment=None, driver=None):\n if driver and environment:\n raise TypeError('only one of \"driver\" and \"environment\" may be ' +\n 'specified')\n\n test = sourcify(test, File)\n build.tests.targets.append(test)\n case = TestCase(test, pshell.listify(options), environment or {})\n (driver or build.tests).tests.append(case)\n return case\n\n@builtin.globals('builtins', 'build_inputs', 'env')\ndef test_driver(builtins, build, env, driver, options=None, environment=None,\n parent=None):\n if parent and environment:\n raise TypeError('only one of \"parent\" and \"environment\" may be ' +\n 'specified')\n\n driver = objectify(driver, Executable, builtins['system_executable'])\n result = TestDriver(driver, pshell.listify(options), environment or {})\n (parent or build.tests).tests.append(result)\n return result\n\n@builtin.globals('build_inputs')\ndef test_deps(build, *args):\n if len(args) == 0:\n raise ValueError('expected at least one argument')\n build.tests.extra_deps.extend(i for i in args if i.creator)\n\n@builtin.globals('build_inputs')\ndef global_options(build, options, lang):\n if not lang in build.global_options:\n build.global_options[lang] = []\n build.global_options[lang].extend(pshell.listify(options))\n\n@builtin.globals('build_inputs')\ndef global_link_options(build, options):\n build.global_link_options.extend(pshell.listify(options))\n","sub_path":"bfg9000/builtins/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":9323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"438180353","text":"\"\"\"empty message\n\nRevision ID: a65fa30520d5\nRevises: 9f543ca62d40\nCreate Date: 2020-03-06 03:24:08.763789\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a65fa30520d5'\ndown_revision = '9f543ca62d40'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('meeting',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date', sa.DateTime(), nullable=True),\n sa.Column('time_start', sa.Integer(), nullable=True),\n sa.Column('time_end', sa.Integer(), nullable=True),\n sa.Column('client_id', sa.Integer(), nullable=True),\n sa.Column('client_comment', sa.String(length=255), nullable=True),\n sa.Column('manager_comment', sa.String(length=255), nullable=True),\n sa.ForeignKeyConstraint(['client_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_meeting_time_end'), 'meeting', ['time_end'], unique=False)\n op.create_index(op.f('ix_meeting_time_start'), 'meeting', ['time_start'], unique=False)\n op.drop_index('ix_post_timestamp', table_name='post')\n op.drop_table('post')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('post',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('body', sa.VARCHAR(length=140), nullable=True),\n sa.Column('timestamp', sa.DATETIME(), nullable=True),\n sa.Column('user_id', sa.INTEGER(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_post_timestamp', 'post', ['timestamp'], unique=False)\n op.drop_index(op.f('ix_meeting_time_start'), table_name='meeting')\n op.drop_index(op.f('ix_meeting_time_end'), table_name='meeting')\n op.drop_table('meeting')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/a65fa30520d5_.py","file_name":"a65fa30520d5_.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"617418104","text":"from sklearn.tree import DecisionTreeClassifier\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nimport datetime\n\ndigits_data = datasets.load_digits()\n\n\nclass DTree(object):\n def __init__(self, X_train, X_test, y_train, y_test, start_state=0):\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n self.start_state = start_state\n self.accuracy = 0.0\n\n def fit(self):\n self.dt = DecisionTreeClassifier() # 引入模型\n self.dt.fit(self.X_train, self.y_train)\n\n def predict(self):\n y_pred = self.dt.predict(self.X_test)\n miss_classified = (y_pred != self.y_test).sum()\n self.accuracy = accuracy_score(y_pred, self.y_test)\n\n def main(self):\n start = datetime.datetime.now()\n self.fit()\n\n end = datetime.datetime.now()\n self.predict()\n #print((end - start).total_seconds())\n return [self.accuracy, (end - start).total_seconds()]\n\n","sub_path":"Proj3/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"121099998","text":"contadorprodutos = maiorpreço = totalpreço = preçomenor = maiorquemil = 0\nnome_produto_maiorpreço = ''\nnome_produto_menorpreço = ''\nwhile True:\n produto = str(input('Qual o nome do produto? ')).capitalize()\n preço = int(input('Qual o preço do produto: R$'))\n contadorprodutos += 1\n totalpreço += preço\n if contadorprodutos == 1:\n maiorpreço = preçomenor = preço\n nome_produto_maiorpreço = produto\n nome_produto_menorpreço = produto\n if preço > maiorpreço:\n maiorpreço = preço\n nome_produto_maiorpreço = produto\n if preço < preçomenor:\n preçomenor = preço\n nome_produto_menorpreço = produto\n if preço > 1000:\n maiorquemil += 1\n stop = ' '\n while stop not in 'SN':\n stop = str(input('Deseja continuar? [S/N]')).upper().strip()[0]\n if stop == 'N':\n break\nprint(f'''O Produto com o maior preço foi {nome_produto_maiorpreço} e seu preço foi de R${maiorpreço:.2f}\nO produto com menor preço foi {nome_produto_menorpreço} e seu preço foi de R${preçomenor:.2f}\nA Quantidade de produtos adicionados foi {contadorprodutos} e o total da compra foi de R${totalpreço:.2f}\nQuantidade de produtos com preço maior que R$1000,00 são de {maiorquemil}''')","sub_path":"Exercícios curso em video/Exercicios/ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"277532995","text":"import numpy as np\nimport vtk\nfrom vtk.util.numpy_support import vtk_to_numpy\n\n\ndef read_vector(vtk_file, component_names):\n r\"\"\"\n \"\"\"\n data = get_data(vtk_file)\n mesh, shape = get_mesh(data)\n vector = get_vector(data, shape, component_names[0],\n component_names[1], component_names[2])\n return mesh, vector\n\ndef read_scalar(vtk_file, component_name):\n r\"\"\"\n \"\"\"\n data = get_data(vtk_file)\n mesh, shape = get_mesh(data)\n scalar = get_scalar(data, shape, component_name)\n return mesh, scalar\n\ndef get_data(vtk_file):\n r\"\"\"\n Get data from a vtk rectilinear grid file.\n\n Parameters\n ----------\n vtk_file: string\n file name\n\n Returns\n -------\n data: RectilinearGrid object\n contains all data and grid points.\n \"\"\"\n reader = vtk.vtkRectilinearGridReader()\n reader.SetFileName(vtk_file)\n reader.ReadAllVectorsOn()\n reader.ReadAllScalarsOn()\n reader.Update()\n data = reader.GetOutput()\n return data\n\n\ndef get_mesh(data):\n r\"\"\"\n Reconstruct vtk rectilinear mesh with numpy.\n\n Parameters\n ----------\n data: RectilinearGrid object\n contains all data and grid points.\n\n Returns\n -------\n mesh: array of ndarray\n mesh of the rectilinear grid\n shape: tuple\n mesh shape\n \"\"\"\n x_coord = vtk_to_numpy(data.GetXCoordinates())\n y_coord = vtk_to_numpy(data.GetYCoordinates())\n z_coord = vtk_to_numpy(data.GetZCoordinates())\n mesh = np.meshgrid(x_coord, y_coord, z_coord)\n shape = mesh[0].shape\n return mesh, shape\n\n\ndef get_scalar(data, shape, scalar_name):\n r\"\"\"\n Return numpy array of a scalar in vtk RectilinearGrid object.\n\n Parameters\n ----------\n data: RectilinearGrid object\n contains all data and grid points.\n shape: tuple\n mesh shape\n scalar_name: string\n name of scalar\n Returns\n -------\n scalar: ndarray\n scalar data resized to shape\n \"\"\"\n shape = (shape[2], shape[0], shape[1])\n data = data.GetPointData()\n scalar = vtk_to_numpy(data.GetArray(scalar_name)).astype('float64')\n scalar.resize(shape)\n scalar = np.swapaxes(np.swapaxes(scalar, 1, 0), 2, 1)\n return scalar\n\n\ndef get_vector(data, shape, x_name,\n y_name, z_name):\n r\"\"\"\n Return numpy array of a scalar in vtk RectilinearGrid object.\n\n Parameters\n ----------\n data: RectilinearGrid object\n contains all data and grid points.\n shape: tuple\n mesh shape\n x_name: string\n name of x component of vector\n y_name: string\n name of y component of vector\n z_name: string\n name of z component of vector\n Returns\n -------\n vector: tuple of ndarray\n vector data resized to shape\n \"\"\"\n vector_x = get_scalar(data, shape, x_name)\n vector_y = get_scalar(data, shape, y_name)\n vector_z = get_scalar(data, shape, z_name)\n return [vector_x, vector_y, vector_z]\n","sub_path":"vector_comparison/read_rectilinear_vtk.py","file_name":"read_rectilinear_vtk.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"497184649","text":"#! /usr/bin/env python3\nimport matplotlib\nmatplotlib.use('Agg')\nfrom scipy.interpolate import interp1d\nfrom pyathena.utils import *\nfrom pydrum.plots1d import *\nfrom pylab import *\nfrom plot_mcmc import *\nfrom astropy.io import fits\nfrom multiprocessing import Pool\n\nlats, tps, h2os, nh3s, pres, rhos = [], [], [], [], [], []\n\nfiles = glob.glob('mwr*.fits')\ncases = [fname[:-5] for fname in files]\nwith Pool(3) as p:\n results = p.map(plot_profiles, cases)\n\nfor i, v in enumerate(results):\n lats.append(parse_signed(cases[i]))\n tps.append(average(v[0], axis = 0, weights = v[5]))\n h2os.append(average(v[1], axis = 0, weights = v[5]))\n nh3s.append(average(v[2], axis = 0, weights = v[5]))\n pres.append(v[3])\n rhos.append(v[4])\n\nprint('making 2d maps ...')\nlats = array(lats)\ntps = array(tps)\nh2os = array(h2os)\nnh3s = array(nh3s)/2.7*350. # g/kg -> ppm\npres = array(pres)\nrhos = array(rhos)\n\nix = argsort(lats)\nlats = lats[ix]\ntps = tps[ix,:]\nnh3s = nh3s[ix,:]\npres = pres[ix,:]\nrhos = rhos[ix,:]\n\noutput = re.sub('_[mp]\\d+\\.\\d*', '', cases[0])\noutput = re.sub('mwr', 'profile', output)\n\n# interpolation to constant pressure\npavg = mean(pres, axis = 0)\n\nfor i in range(len(lats)):\n Tfunc = interp1d(log(pres[i,::-1]), tps[i,::-1], \n bounds_error = False, fill_value = 'extrapolate')\n tps[i,:] = Tfunc(log(pavg))\n NH3func = interp1d(log(pres[i,::-1]), nh3s[i,::-1], \n bounds_error = False, fill_value = 'extrapolate')\n nh3s[i,:] = NH3func(log(pavg))\n\n# ammonia map\nX, Y = meshgrid(lats, pavg)\nfigure(1, figsize = (10, 8))\nax = axes()\nh = ax.contourf(X, Y, nh3s.T, linspace(0, 400, 21), cmap = 'inferno')\nc = colorbar(h)\nc.ax.invert_yaxis()\nax.set_yscale('log')\nax.set_ylim([100., 0.3])\nax.set_xlabel('PC latitude', fontsize = 15)\nax.set_ylabel('Pressure (bar)', fontsize = 15)\nsavefig('%s_ammonia2d.png' % output, bbox_inches = 'tight')\nsavefig('%s_ammonia2d.pdf' % output, bbox_inches = 'tight')\nclose()\n\n# temperature anomaly map\nfigure(1, figsize = (10, 8))\nax = axes()\n#h1 = ax.contour(X, Y, tps.T, [-5, -3, -1], colors = 'b')\n#clabel(h1, fontsize = 12, inline = 1, fmt = '%.1f')\n#h2 = ax.contour(X, Y, tps.T, linspace(1, 21, 11), colors = 'r')\n#clabel(h2, fontsize = 12, inline = 1, fmt = '%.1f')\nh = ax.contourf(X, Y, tps.T, linspace(-5, 11, 17), cmap = 'OrRd')\n#ax.contour(X, Y, tps.T, [0.], colors = '0.7', linewidths = 4)\nc = colorbar(h)\nax.set_yscale('log')\nax.set_ylim([100., 0.3])\nax.set_xlabel('PC latitude', fontsize = 15)\nax.set_ylabel('Pressure (bar)', fontsize = 15)\nsavefig('%s_temp2d.png' % output, bbox_inches = 'tight')\nsavefig('%s_temp2d.pdf' % output, bbox_inches = 'tight')\nclose()\n\n# save results to fits file\n#result = read_baseline(cases[0])\nwith Pool(8) as p:\n results = p.map(read_baseline, array(cases)[ix])\n\nT1, P1, Z1 = [], [], []\nfor v in results:\n T1.append(v[0])\n P1.append(v[1])\n Z1.append(v[2])\nT1 = array(T1)\nP1 = array(P1)\nZ1 = array(Z1)\n\nh1 = fits.PrimaryHDU(nh3s/350.*2.7) # ppm -> g/kg\nh1.header['CREATOR'] = ('Cheng Li', 'file creator')\nh1.header['VAR'] = ('qNH3', 'ammonia mass mixing ratio (g/kg)')\nh1.header['LATS'] = (lats.min(), 'PC latitude south')\nh1.header['LATN'] = (lats.max(), 'PC latitude north')\nh1.header['PTOP'] = (pres.min(), 'top pressure (bar)')\nh1.header['PBOT'] = (pres.max(), 'bottom pressure (bar)')\n\nh2 = fits.ImageHDU(h2os)\nh2.header['VAR'] = ('qH2O', 'water mass mixing ratio (g/kg)')\n\nh3 = fits.ImageHDU(tps)\nh3.header['VAR'] = ('Tp', 'temperature anomaly (K)')\n\nh4 = fits.ImageHDU(lats)\nh4.header['VAR'] = ('lat', 'PC latitude coordinates')\n\nh5 = fits.ImageHDU(pres)\nh5.header['VAR'] = ('pres', 'pressure (bar)')\n\nh6 = fits.ImageHDU(rhos)\nh6.header['VAR'] = ('rho', 'density (kg/m^3)')\n\nh7 = fits.ImageHDU(T1)\nh7.header['VAR'] = ('T1', 'reference temperature (K)')\n\nh8 = fits.ImageHDU(P1)\nh8.header['VAR'] = ('P1', 'reference pressure (bar)')\n\nh9 = fits.ImageHDU(Z1)\nh9.header['VAR'] = ('Z1', 'reference height (km)')\n\nhdul = fits.HDUList([h1, h2, h3, h4, h5, h6, h7, h8, h9])\nhdul.writeto('%s.fits' % output, overwrite = True)\n","sub_path":"plot_map2d.py","file_name":"plot_map2d.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"585780112","text":"# %load q03_rf_rfe/build.py\n# Default imports\nimport pandas as pd\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\nfrom sklearn.feature_selection import RFE\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# Your solution code here\n\ndef rf_rfe(df):\n rf = RandomForestClassifier()\n X = data.iloc[:,:-1]\n y = data.iloc[:,-1]\n features = X.shape[1]/2\n rfe_rf = RFE(rf, n_features_to_select=features)\n rfe_rf.fit(X,y)\n return list(X.loc[:,rfe_rf.support_].columns.values)\n","sub_path":"q03_rf_rfe/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"438074838","text":"import argparse\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nfrom object_detection.utils import visualization_utils as vis_util\nfrom object_detection.utils import label_map_util\n\n#if tf.__version__ < '1.4.0':\n# raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')\n\nNUM_CLASSES = 764\nimagepath=\"/home/ice2019/Ice2019_Detect/Data/pj_vehicle_train_00002-of-00004\"\nxmlpath=\"/home/ice2019/Ice2019_Detect/Data/pj_vehicle_train_00002-of-00004_autolabel\"\n\nPATH_TO_CKPT = \"/home/ice2019/Ice2019_Detect/Outputs/\"+'exported_graphs/frozen_inference_graph.pb'\nPATH_TO_LABELS = \"/home/ice2019/Ice2019_Detect/Data/\"+ 'labels_car_tem.txt'\n\n\n#获取filenames\ndef file_name(file_dir):\n list=[]\n for root, dirs, files in os.walk(file_dir):\n list.append(files)\n return list\n\nimageFilenames=file_name(imagepath)\n\n\n\n\ndef CreateLableXmlAuto(imageFilenames):\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n def load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n with detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n total_count = 0.0\n correct_count = 0.0\n for filename in imageFilenames:\n total_count += 1.0\n test_img_path = imagepath + \"/\" + filename\n image = Image.open(test_img_path)\n image_np = load_image_into_numpy_array(image)\n image_np_expanded = np.expand_dims(image_np, axis=0)\n print(\"session begin to run \")\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n print(\"session end \")\n (im_width, im_height) = image.size\n ymin=int(boxes[0][0][0]*im_height)\n print(int(boxes[0][0][0]*im_height))\n xmin=int(boxes[0][0][1]*im_width)\n print(int(boxes[0][0][1]*im_width))\n ymax=int(boxes[0][0][2]*im_height)\n print(int(boxes[0][0][2]*im_height))\n xmax=int(boxes[0][0][3]*im_width)\n print(int(boxes[0][0][3]*im_width))\n labelname = str(int(filename.split(\".\")[0].split(\"_\")[1])+1)\n output_label = classes[0][0]\n if (int(output_label)== int(labelname)):\n correct_count += 1.0\n createLabelXML(filename, ymin, xmin, ymax, xmax, labelname)\n\n print(\"annotation correction ratio: {}\".format(correct_count/total_count))\n\n #return ymin,xmin,ymax,xmax\n #return int(boxes[0][0][0]*im_height),int(boxes[0][0][1]*im_width),int(boxes[0][0][2]*im_height),int(boxes[0][0][3]*im_width)\n\n\n\ndef createLabelXML(filename,ymin, xmin, ymax, xmax,labelname):\n imagePath=imagepath+'/'+filename\n fh = open(xmlpath+'/'+filename.split(\".\")[0]+\".xml\", 'w')\n fh.write(\"\")\n fh.write(\"\\n\")\n fh.write(\"\timages\")\n fh.write(\"\\n\")\n fh.write(\"\t\"+filename+\"\")\n fh.write(\"\\n\")\n fh.write(\"\t\"+imagePath+\"\")\n fh.write(\"\\n\")\n fh.write(\"\t\")\n fh.write(\"\\n\")\n fh.write(\"\t\tUnknown\")\n fh.write(\"\\n\")\n fh.write(\"\t\")\n fh.write(\"\\n\")\n fh.write(\"\t\")\n fh.write(\"\\n\")\n fh.write(\"\t\t432\")\n fh.write(\"\\n\")\n fh.write(\"\t\t320\")\n fh.write(\"\\n\")\n fh.write(\"\t\t3\")\n fh.write(\"\\n\")\n fh.write(\"\t\")\n fh.write(\"\\n\")\n fh.write(\"\t0\")\n fh.write(\"\\n\")\n fh.write(\"\t\")\n fh.write(\"\\n\")\n fh.write(\"\t\t\"+labelname+\"\")\n fh.write(\"\\n\")\n fh.write(\"\t\tUnspecified\")\n fh.write(\"\\n\")\n fh.write(\"\t\t0\")\n fh.write(\"\\n\")\n fh.write(\"\t\t0\")\n fh.write(\"\\n\")\n fh.write(\"\t\t\")\n fh.write(\"\\n\")\n fh.write(\"\t\t\t\"+str(xmin)+\"\")\n fh.write(\"\\n\")\n fh.write(\"\t\t\t\"+str(ymin)+\"\")\n fh.write(\"\\n\")\n fh.write(\"\t\t\t\"+str(xmax)+\"\")\n fh.write(\"\\n\")\n fh.write(\"\t\t\t\"+str(ymax)+\"\")\n fh.write(\"\\n\")\n fh.write(\"\t\t\")\n fh.write(\"\\n\")\n fh.write(\"\t\")\n fh.write(\"\\n\")\n fh.write(\"\")\n fh.write(\"\\n\")\n fh.close()\n\n\nif __name__ == '__main__':\n CreateLableXmlAuto(imageFilenames[0])\n","sub_path":"research/remarkLableAndLocation.py","file_name":"remarkLableAndLocation.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"618123525","text":"#################################################################################\n# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,\n# National Renewable Energy Laboratory, and National Energy Technology\n# Laboratory (subject to receipt of any required approvals from the U.S. Dept.\n# of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#################################################################################\n\"\"\"\nThis module contains a zero-order representation of a gas-sparged membrane unit.\n\"\"\"\nfrom types import MethodType\nfrom idaes.core import declare_process_block_class\n\nfrom watertap.core import pump_electricity, ZeroOrderBaseData\nimport idaes.logger as idaeslog\nimport idaes.core.util.scaling as iscale\nfrom pyomo.environ import NonNegativeReals, Var, units as pyunits, Reference\nfrom watertap.core.zero_order_sido import initialize_sido\n\n# Some more information about this module\n__author__ = \"Adam Atia\"\n\n# Set up logger\n_log = idaeslog.getLogger(__name__)\n\n\n@declare_process_block_class(\"GasSpargedMembraneZO\")\nclass GasSpargedMembraneZOData(ZeroOrderBaseData):\n \"\"\"\n Zero-Order model for a gas-sparged membrane.\n This unit is similar to a SIDO, but there is technically a third outlet for gas extraction.\n Three StateBlocks are added with corresponding Ports:\n\n * properties_inlet\n * properties_treated\n * properties_byproduct\n\n Two additional variables are added:\n\n * recovery_vol (indexed by time)\n * removal_frac_mass_comp (indexed by time and component)\n\n Four additional constraints are added to represent the material balances, with modifications\n to account for gas extraction.\n\n * water_recovery_equation (indexed by time)\n * flow_balance (indexed by time)\n * solute_removal_equation (indexed by time and solute)\n * solute_treated_equation (indexed by time and solute)\n\n The build method also sets private attributes on the unit model with references\n to the appropriate initialization and scaling methods to use and to return\n the inlet volumetric flow rate.\n \"\"\"\n\n CONFIG = ZeroOrderBaseData.CONFIG()\n\n def build(self):\n super().build()\n\n self._tech_type = \"gas_sparged_membrane\"\n\n self._has_recovery_removal = True\n self._initialize = MethodType(initialize_sido, self)\n self._scaling = MethodType(calculate_scaling_factors_gas_extraction, self)\n\n # Create state blocks for inlet and outlets\n tmp_dict = dict(**self.config.property_package_args)\n tmp_dict[\"has_phase_equilibrium\"] = False\n tmp_dict[\"defined_state\"] = True\n\n self.properties_in = self.config.property_package.build_state_block(\n self.flowsheet().time, doc=\"Material properties at inlet\", **tmp_dict\n )\n\n tmp_dict_2 = dict(**tmp_dict)\n tmp_dict_2[\"defined_state\"] = False\n\n self.properties_treated = self.config.property_package.build_state_block(\n self.flowsheet().time,\n doc=\"Material properties of treated water\",\n **tmp_dict_2\n )\n self.properties_byproduct = self.config.property_package.build_state_block(\n self.flowsheet().time,\n doc=\"Material properties of byproduct stream\",\n **tmp_dict_2\n )\n\n # Create Ports\n self.add_port(\"inlet\", self.properties_in, doc=\"Inlet port\")\n self.add_port(\n \"treated\", self.properties_treated, doc=\"Treated water outlet port\"\n )\n self.add_port(\n \"byproduct\", self.properties_byproduct, doc=\"Byproduct outlet port\"\n )\n\n # Add performance variables\n self.recovery_frac_mass_H2O = Var(\n self.flowsheet().time,\n initialize=0.8,\n domain=NonNegativeReals,\n units=pyunits.dimensionless,\n bounds=(0.0, 1.0000001),\n doc=\"Mass recovery fraction of water in the treated stream\",\n )\n self.removal_frac_mass_comp = Var(\n self.flowsheet().time,\n self.config.property_package.solute_set,\n domain=NonNegativeReals,\n initialize=0.01,\n units=pyunits.dimensionless,\n doc=\"Solute removal fraction on a mass basis\",\n )\n self.gas_mass_influent_ratio = Var(\n self.flowsheet().time,\n domain=NonNegativeReals,\n units=pyunits.dimensionless,\n doc=\"Mass flow of gas extracted per mass flow of influent\",\n )\n self.flow_mass_gas_extraction = Var(\n self.flowsheet().time,\n domain=NonNegativeReals,\n units=pyunits.kg / pyunits.s,\n doc=\"Mass flow of hydrogen extracted\",\n )\n self._fixed_perf_vars.append(self.gas_mass_influent_ratio)\n self._perf_var_dict[\n \"Mass of gas extracted per mass flow of influent(kg/d/(kg/d)\"\n ] = self.gas_mass_influent_ratio\n self._perf_var_dict[\n \"Mass flow of gas extracted (kg/s))\"\n ] = self.flow_mass_gas_extraction\n\n # Add performance constraints\n # Water recovery\n @self.Constraint(self.flowsheet().time, doc=\"Water recovery equation\")\n def water_recovery_equation(b, t):\n return (\n b.recovery_frac_mass_H2O[t] * b.properties_in[t].flow_mass_comp[\"H2O\"]\n == b.properties_treated[t].flow_mass_comp[\"H2O\"]\n )\n\n # Flow balance\n @self.Constraint(self.flowsheet().time, doc=\"Overall flow balance\")\n def mass_balance(b, t):\n return (\n sum(\n b.properties_in[t].flow_mass_comp[j]\n for j in self.config.property_package.component_list\n )\n == sum(\n b.properties_treated[t].flow_mass_comp[j]\n for j in self.config.property_package.component_list\n )\n + sum(\n b.properties_byproduct[t].flow_mass_comp[j]\n for j in self.config.property_package.component_list\n )\n + b.flow_mass_gas_extraction[t]\n )\n\n # Gas extraction\n @self.Constraint(self.flowsheet().time, doc=\"Gas extraction equation\")\n def mass_gas_extraction_equation(b, t):\n return b.flow_mass_gas_extraction[t] == b.gas_mass_influent_ratio[t] * sum(\n b.properties_in[t].flow_mass_comp[j]\n for j in self.config.property_package.component_list\n )\n\n # Solute removal\n @self.Constraint(\n self.flowsheet().time,\n self.config.property_package.solute_set,\n doc=\"Solute removal equations\",\n )\n def solute_removal_equation(b, t, j):\n return (\n b.removal_frac_mass_comp[t, j] * b.properties_in[t].flow_mass_comp[j]\n == b.properties_byproduct[t].flow_mass_comp[j]\n )\n\n # Solute concentration of treated stream\n @self.Constraint(\n self.flowsheet().time,\n self.config.property_package.solute_set,\n doc=\"Constraint for solute concentration in treated \" \"stream.\",\n )\n def solute_treated_equation(b, t, j):\n return (1 - b.removal_frac_mass_comp[t, j]) * b.properties_in[\n t\n ].flow_mass_comp[j] == b.properties_treated[t].flow_mass_comp[j]\n\n self._stream_table_dict = {\n \"Inlet\": self.inlet,\n \"Treated\": self.treated,\n \"Byproduct\": self.byproduct,\n }\n\n self._perf_var_dict[\"Water Recovery\"] = self.recovery_frac_mass_H2O\n self._perf_var_dict[\"Solute Removal\"] = self.removal_frac_mass_comp\n\n self._get_Q = MethodType(_get_Q_gas_extraction, self)\n\n self._Q = Reference(self.properties_in[:].flow_vol)\n pump_electricity(self, self._Q)\n\n # no costing method\n @property\n def default_costing_method(self):\n return lambda *args, **kwargs: None\n\n\ndef calculate_scaling_factors_gas_extraction(self):\n # Get default scale factors and do calculations from base classes\n for t, v in self.water_recovery_equation.items():\n iscale.constraint_scaling_transform(\n v,\n iscale.get_scaling_factor(\n self.properties_in[t].flow_mass_comp[\"H2O\"],\n default=1,\n warning=True,\n hint=\" for water recovery\",\n ),\n )\n\n for t, v in self.mass_balance.items():\n iscale.constraint_scaling_transform(\n v,\n iscale.get_scaling_factor(\n self.properties_in[t].flow_mass_comp[\"H2O\"], default=1, warning=False\n ),\n ) # would just be a duplicate of above\n\n for (t, j), v in self.solute_removal_equation.items():\n iscale.constraint_scaling_transform(\n v,\n iscale.get_scaling_factor(\n self.properties_in[t].flow_mass_comp[j],\n default=1,\n warning=True,\n hint=\" for solute removal\",\n ),\n )\n\n for (t, j), v in self.solute_treated_equation.items():\n iscale.constraint_scaling_transform(\n v,\n iscale.get_scaling_factor(\n self.properties_in[t].flow_mass_comp[j], default=1, warning=False\n ),\n ) # would just be a duplicate of above\n for t, v in self.mass_gas_extraction_equation.items():\n iscale.constraint_scaling_transform(v, 1e3)\n\n\ndef _get_Q_gas_extraction(self, t):\n return self.properties_in[t].flow_vol\n","sub_path":"watertap/unit_models/zero_order/gas_sparged_membrane_zo.py","file_name":"gas_sparged_membrane_zo.py","file_ext":"py","file_size_in_byte":9923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"587679166","text":"\"\"\"\nGiven an array of integers arr and an integer k.\n\nA value arr[i] is said to be stronger than a value arr[j] if |arr[i] - m| > |arr[j] - m| where m is the median of the array.\nIf |arr[i] - m| == |arr[j] - m|, then arr[i] is said to be stronger than arr[j] if arr[i] > arr[j].\n\nReturn a list of the strongest k values in the array. return the answer in any arbitrary order.\n\nMedian is the middle value in an ordered integer list. More formally, if the length of the list is n, the median is the element in position ((n - 1) / 2) in the sorted list (0-indexed).\n\nFor arr = [6, -3, 7, 2, 11], n = 5 and the median is obtained by sorting the array arr = [-3, 2, 6, 7, 11] and the median is arr[m] where m = ((5 - 1) / 2) = 2. The median is 6.\nFor arr = [-7, 22, 17, 3], n = 4 and the median is obtained by sorting the array arr = [-7, 3, 17, 22] and the median is arr[m] where m = ((4 - 1) / 2) = 1. The median is 3.\n\n\nExample 1:\n Input: arr = [1,2,3,4,5], k = 2\n Output: [5,1]\n Explanation: Median is 3, the elements of the array sorted by the strongest are [5,1,4,2,3]. The strongest 2 elements are [5, 1]. [1, 5] is also accepted answer.\n Please note that although |5 - 3| == |1 - 3| but 5 is stronger than 1 because 5 > 1.\n\nExample 2:\n Input: arr = [1,1,3,5,5], k = 2\n Output: [5,5]\n Explanation: Median is 3, the elements of the array sorted by the strongest are [5,5,1,1,3]. The strongest 2 elements are [5, 5].\n\nExample 3:\n Input: arr = [6,7,11,7,6,8], k = 5\n Output: [11,8,6,6,7]\n Explanation: Median is 7, the elements of the array sorted by the strongest are [11,8,6,6,7,7].\n Any permutation of [11,8,6,6,7] is accepted.\n\nExample 4:\n Input: arr = [6,-3,7,2,11], k = 3\n Output: [-3,11,2]\n\nExample 5:\n Input: arr = [-7,22,17,3], k = 2\n Output: [22,17]\n\nConstraints:\n 1 <= arr.length <= 10^5\n -10^5 <= arr[i] <= 10^5\n 1 <= k <= arr.length\n\"\"\"\n\n# Time limit Exceeded\ndef getStrongest(arr, k):\n median = sorted(arr)[(len(arr)-1)//2]\n\n def isStronger(num1, num2, median):\n if abs(num1 - median) > abs(num2 - median):\n return True\n elif abs(num1 - median) == abs(num2 - median):\n if num1 > num2:\n return True\n else:\n return False\n else:\n return False\n\n l = len(arr)\n j = k\n while j > 0:\n is_swapped = False\n for i in range(l - 1):\n if isStronger(arr[i], arr[i + 1], median):\n temp = arr[i]\n arr[i] = arr[i + 1]\n arr[i + 1] = temp\n is_swapped = True\n j -= 1\n l -= 1\n if is_swapped == False:\n break\n arr.reverse()\n return arr[:k]\n\n# Time Limit Exceeded\ndef getStrongest2(arr, k):\n median = sorted(arr)[(len(arr)-1)//2]\n def isStronger(num1, num2, median):\n if abs(num1 - median) > abs(num2 - median):\n return True\n elif abs(num1 - median) == abs(num2 - median):\n if num1 > num2:\n return True\n else:\n return False\n else:\n return False\n result = list()\n for i in range(len(arr)):\n is_changed = False\n for j in range(len(result)):\n if arr[i] == result[j] or isStronger(arr[i], result[j], median):\n result.insert(j, arr[i])\n is_changed = True\n break\n if len(result) < k and not is_changed:\n result.append(arr[i])\n if len(result) > k:\n del result[-1]\n return result\n\narr = [1,2,3,4,5]\nk = 2\nprint(getStrongest2(arr, k))\n\narr = [1,1,3,5,5]\nk = 2\nprint(getStrongest2(arr, k))\n\narr = [6,7,11,7,6,8]\nk = 5\nprint(getStrongest2(arr, k))\n\narr = [6,-3,7,2,11]\nk = 3\nprint(getStrongest2(arr, k))\n\narr = [-7,22,17,3]\nk = 2\nprint(getStrongest2(arr, k))\n\narr = [-7,22,17,3]\nk = 2\nprint(getStrongest2(arr, k))\n\narr = [-493,598,-262,-918,-76,-532,521]\nk = 7\nprint(getStrongest2(arr, k))\n\n","sub_path":"LeetCode-Python/1471 The k Strongest Values in an Array.py","file_name":"1471 The k Strongest Values in an Array.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"499020984","text":"#July 28th, Yinchao He\n\n#loop through the list, and print a greeting to each user\nusernames = []\nif usernames:\n\tfor username in usernames:\n\t\tif username == 'admin':\n\t\t\tprint(\"Hello admin, would you like to see a status report?\")\n\t\telse:\n\t\t\tprint(\"thank you for logging in again\")\nelse:\n\tprint(\"We need to find some users!\")\n\n#make sure everyone has a unique username\ncurrent_users = ['Yinchao', 'Bowen', 'John', 'Jerry']\nnew_users = ['Yinchao', 'BOWen', 'Tom', 'Mat']\nfor new_user in new_users:\n\tcheck_mark = 1\n\tfor current_user in current_users:\n\t\tif new_user.lower() == current_user.lower():\n\t\t\tprint(\"the username is not available\")\n\t\t\tcheck_mark = 0\n\t\t\tcontinue\n\tif check_mark == 1:\n\t\tprint(\"the username is available\")\n\n#output ordinal numbers\nnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\nfor number in numbers:\n\tif number == 1:\n\t\tprint(\"1st\")\n\telif number == 2:\n\t\tprint(\"2nd\")\n\telif number == 3:\n\t\tprint(\"3rd\")\n\telse:\n\t\tprint(str(number) + \"th\")\n\n","sub_path":"chapter5_if_statement/if_statement_list.py","file_name":"if_statement_list.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"196981249","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of AceQL Python Client SDK.\n# AceQL Python Client SDK: Remote SQL access over HTTP with AceQL HTTP.\n# Copyright (C) 2020, KawanSoft SAS\n# (http://www.kawansoft.com). All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\nimport aceql\nfrom aceql import *\n\nimport unittest\nimport sys\nimport os\nfrom os import sep\nfrom datetime import datetime, date\n\n\nclass TestAll(unittest.TestCase):\n def test_A(self):\n print(sys.version)\n # assert sys.version_info >= (2,5)\n print()\n\n print(\"aceql.apilevel : \" + aceql.apilevel)\n print(\"aceql.threadsafety: \" + str(aceql.threadsafety))\n print(\"aceql.paramstyle : \" + aceql.paramstyle)\n\n proxies = None\n auth = None\n\n use_proxy = False\n if use_proxy:\n proxies = {\n \"http\": \"http://localhost:8080\",\n }\n\n auth = TestAll.getProxyAuth()\n\n localhost = \"http://localhost:9090/aceql\"\n server_host = \"https://www.aceql.com:9443/aceql\"\n server_host_no_ssl = \"http://www.aceql.com:9090/aceql\"\n\n host = localhost\n database = \"sampledb\"\n username= \"user1\"\n\n password= \"password1\"\n session_id = None\n #password= None\n #session_id = \"3j7aoduuekz3r8qdzyb2lc9plj\"\n\n Connection.set_timeout(10)\n Connection.set_stateless(False)\n connection = aceql.connect(host, database, username, password, session_id, proxies=proxies, auth=auth)\n connection.set_gzip_result(True)\n\n print()\n print(\"aceql version: \" + connection.get_client_version())\n print()\n\n connection.set_holdability(\"hold_cursors_over_commit\")\n holdability = connection.get_holdability()\n print(\"holdability: \" + holdability)\n self.assertEqual(holdability, u\"hold_cursors_over_commit\")\n\n connection.set_holdability(\"close_cursors_at_commit\")\n holdability = connection.get_holdability()\n print(\"holdability: \" + holdability)\n self.assertEqual(holdability, u\"close_cursors_at_commit\")\n\n connection.set_auto_commit(True)\n auto_commit = connection.get_auto_commit()\n print(\"auto_commit: \" + str(auto_commit))\n self.assertEqual(auto_commit, True)\n\n connection.set_auto_commit(False)\n auto_commit = connection.get_auto_commit()\n print(\"auto_commit: \" + str(auto_commit))\n self.assertEqual(auto_commit, False)\n\n cursor = connection.cursor()\n\n print(\"Before delete all orderlog\")\n sql = \"delete from orderlog where customer_id >= ?\"\n params = (0,)\n cursor.execute(sql, params)\n\n # customer_id integer NOT NULL,\n # item_id integer NOT NULL,\n # description character varying(64) NOT NULL,\n # cost_price numeric,\n # date_placed date NOT NULL,\n # date_shipped timestamp without time zone,\n # jpeg_image oid,\n # is_delivered numeric,\n # quantity integer NOT NULL,\n\n connection.commit()\n\n the_date = date(2017, 11, 3)\n cpt = 0\n filename = os.getcwd() + sep + \"files\" + sep + \"AceQL-Schema.png\"\n statinfo = os.stat(filename)\n the_length = statinfo.st_size * 3\n\n while True:\n\n progress_indicator = ProgressIndicator()\n connection.set_progress_indicator(progress_indicator)\n\n fd = open(filename, \"rb\")\n blob_tuple = (fd, the_length)\n\n sql = \"insert into orderlog values (?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n\n do_use_blob = True\n if not do_use_blob:\n blob_tuple = (None, SqlNullType.BLOB)\n print(\"NULL BLOB INSERT\")\n\n theFloat = float((cpt * 1000) + 44.44)\n print(\"theFloat: \" + str(theFloat))\n params = (cpt, cpt, u\"intitulé_\" + str(cpt), theFloat,\n the_date, datetime.now(), blob_tuple, 1, cpt * 1000)\n print(\"insert: \" + str(params))\n cursor.execute(sql, params)\n cpt += 1\n\n if cpt >= 1:\n break\n\n connection.commit()\n\n sql = \"select * from orderlog where customer_id >= ? order by customer_id\"\n params = (0,)\n cursor.execute(sql, params)\n print(\"cursor.rowcount : \" + str(cursor.rowcount))\n print(\"cursor.description: \" + str(cursor.description))\n\n do_fetch_many = True\n if do_fetch_many:\n rows = cursor.fetchmany(1)\n\n print(\"fetchmany:\")\n for row in rows:\n print(row)\n print()\n\n do_fetch_all = True\n if do_fetch_all:\n rows = cursor.fetchall()\n\n print(\"fetchall:\")\n for row in rows:\n print(row)\n print()\n\n connection.commit()\n\n cursor.close()\n cursor = connection.cursor()\n\n sql = \"select * from orderlog where customer_id >= ? order by customer_id\"\n params = (0,)\n cursor.execute(sql, params)\n print(\"cursor.rowcount : \" + str(cursor.rowcount))\n\n description = cursor.description\n print(\"len(description): \" + str(len(description)))\n print(\"cursor.description: \")\n\n for the_col_desc in description:\n print(the_col_desc)\n\n connection.commit()\n\n print()\n cpt = 0\n while True:\n row = cursor.fetchone()\n if row is None:\n break\n print(row)\n\n # 6 is is the index of BLOB in the row\n total_length = cursor.get_blob_length(6)\n print(\"total_length: \" + str(total_length))\n\n cpt += 1\n # print(\"BLOB length : \" + str(total_length))\n filename = os.path.expanduser(\"~\") + sep + \"AceQL-Schema_OUT_\" + str(cpt) + \".png\"\n response = cursor.get_blob_stream(6)\n\n with open(filename, 'wb') as fd:\n for chunk in response.iter_content(chunk_size=2048):\n fd.write(chunk)\n\n cursor.close()\n\n connection.close()\n connection2 = aceql.connect(host, \"sampledb\", \"user1\", \"password1\", proxies=proxies, auth=auth)\n print(\"connection2.get_auto_commit(): \" + str(connection2.get_auto_commit()))\n print()\n connection2.logout()\n print()\n\n\n @staticmethod\n def getProxyAuth():\n \"\"\"Get proxy auth info from a filename\"\"\"\n with open(\"I:\\\\neotunnel.txt\", \"rt\") as fd:\n content = fd.read()\n lines = content.split()\n auth = ProxyAuth(lines[0].strip(), lines[1].strip())\n return auth\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_orderlog.py","file_name":"test_orderlog.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"140202027","text":"import json\nimport os\nimport sqlite3\nfrom mitmproxy import ctx\n\n\n\ndb_path = 'igetget.sqlite'\n\nif not os.path.exists(db_path):\n conn = sqlite3.connect(db_path)\n # 获取sqlite3.Cursor对象\n c = conn.cursor()\n # 创建persons表\n c.execute('''CREATE TABLE course_list\n (\n name CHAR(50),\n intro CHAR(50) ,\n lecturer_name CHAR(100) ,\n lecturer_title CHAR(50) ,\n price INT,\n phase_num INT,\n learn_user_count INT\n );''')\n\n conn.commit()\n # 关闭数据库连接\n conn.close()\n print('创建数据库成功')\nconn = sqlite3.connect(db_path)\nc = conn.cursor()\n\ndef response(flow):\n\n url = 'https://entree.igetget.com/bauhinia/h5/college/course'\n if flow.request.url.startswith(url):\n text = flow.response.text\n data = json.loads(text)\n courses = data.get('c').get('list')\n for course in courses:\n data = [\n course.get('name'),\n course.get('intro'),\n course.get('lecturer_name'),\n course.get('lecturer_title'),\n course.get('price'),\n course.get('phase_num'),\n course.get('learn_user_count')\n\n ]\n ctx.log.info(str(data))\n\n c.execute('INSERT INTO course_list VALUES(?,?,?,?,?,?,?)', data)\n conn.commit()\n","sub_path":"projects/get_courses/get_courses_list.py","file_name":"get_courses_list.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"346185410","text":"import serial\nimport struct\nimport time\nfrom pprint import pprint\nfrom datetime import datetime, timedelta\nimport numpy as np\nfrom tqdm import tqdm\n\nS = 0\nP = 1\n\n\ndef request_parameter(type_=S, set_=0, number=44):\n word = number\n word |= set_ << 12\n word |= type_ << 15\n lsb, msb = struct.pack(\" S-0-0044\n 0x00, # /\\\n ])\n\n L = len(b)\n b[2] = L - 8\n b[3] = L - 8\n b[1] = (-1 * sum(b)) & 0xFF # checksum\n\n assert (sum(b) & 0xFF) == 0\n\n return b\n\n assert request_wichtungsart_speed() == request_parameter(S, 0, 44)\n","sub_path":"py/sis/read_azimuth.py","file_name":"read_azimuth.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"528748694","text":"# -*- coding: utf-8 -*-\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\ndef my_function():\n wiki_news = open('reduce_zhiwiki.txt', 'r',encoding='utf-8')\n #LineSentence加载语料 Word2Vec训练语料\n model = Word2Vec(LineSentence(wiki_news), sg=0,size=800, window=12, min_count=3, workers=8)\n #model = Word2Vec(LineSentence(wiki_news))\n model.save('zhiwiki_news.word2vec')\n model.wv.save_word2vec_format('word2vec_format.txt')\n\nif __name__ == '__main__':\n my_function()\n","sub_path":"embedding_1/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"112944729","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom board import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^post/(?P\\d+)/$', views.post_detail,name='post_detail'),\n url(r'^post/(?P\\d+)/like/$', views.post_like, name='post_like'),\n url(r'^post/(?P\\d+)/down/$', views.post_down, name='post_down'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n #url(r'^post/(?P\\d+)/$', views.post_detail, name='post_detail'),\n url(r'^post/(?P\\d+)/edit/$', views.post_edit, name='post_edit'),\n url(r'^post/(?P\\d+)/delete/$', views.post_delete, name='post_delete'),\n url(r'^post/(?P\\d+)/comment/new/$', views.comment_new, name='comment_new'),\n url(r'^post/(?P\\d+)/comment/(?P\\d+)/edit/$', views.comment_edit, name='comment_edit'),\n url(r'^post/(?P\\d+)/comment/(?P\\d+)/delete/$', views.comment_delete, name='comment_delete'),\n]\n","sub_path":"udecide/board/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"626855145","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom shoppy.shop.models import Order\n\nclass Account(models.Model):\n user = models.ForeignKey(User, unique=True)\n total_current = models.PositiveIntegerField()\n total_lifetime = models.PositiveIntegerField()\n\nclass TransferOrder(models.Model):\n order = models.ForeignKey('shop.Order')\n transfer = models.ForeignKey('Transfer')\n\nclass Transfer(models.Model):\n account = models.ForeignKey('Account')\n amount = models.PositiveIntegerField()\n date = models.DateTimeField(auto_now_add=True)\n subject = models.CharField(max_length=255, null=True, blank=True)\n isPositive = models.BooleanField()\n isActive = models.BooleanField()\n\n\n","sub_path":"shoppy/points/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"486910416","text":"import mock\nimport datetime\n\nfrom modularodm import Q\nfrom nose.tools import * # flake8: noqa (PEP8 asserts)\n\nfrom framework import auth\nfrom framework.auth import exceptions\nfrom framework.exceptions import PermissionsError\nfrom website import models\nfrom tests import base\nfrom tests.base import fake\nfrom tests import factories\nfrom framework.tasks import handlers\n\n\nclass TestUser(base.OsfTestCase):\n def setUp(self):\n super(TestUser, self).setUp()\n self.user = factories.AuthUserFactory()\n\n def tearDown(self):\n models.Node.remove()\n models.User.remove()\n models.Session.remove()\n super(TestUser, self).tearDown()\n\n # Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2454\n def test_add_unconfirmed_email_when_email_verifications_is_None(self):\n\n self.user.email_verifications = None\n self.user.save()\n email = fake.email()\n self.user.add_unconfirmed_email(email)\n self.user.save()\n assert_in(email, self.user.unconfirmed_emails)\n\n def test_unconfirmed_emails(self):\n assert_equal(\n self.user.unconfirmed_emails,\n []\n )\n self.user.add_unconfirmed_email('foo@bar.com')\n assert_equal(\n self.user.unconfirmed_emails,\n ['foo@bar.com']\n )\n\n # email_verifications field may be None\n self.user.email_verifications = None\n self.user.save()\n assert_equal(self.user.unconfirmed_emails, [])\n\n def test_unconfirmed_emails_unregistered_user(self):\n\n assert_equal(\n factories.UnregUserFactory().unconfirmed_emails,\n []\n )\n\n def test_unconfirmed_emails_unconfirmed_user(self):\n user = factories.UnconfirmedUserFactory()\n\n assert_equal(\n user.unconfirmed_emails,\n [user.username]\n )\n\n def test_remove_unconfirmed_email(self):\n self.user.add_unconfirmed_email('foo@bar.com')\n self.user.save()\n\n assert_in('foo@bar.com', self.user.unconfirmed_emails) # sanity check\n\n self.user.remove_unconfirmed_email('foo@bar.com')\n self.user.save()\n\n assert_not_in('foo@bar.com', self.user.unconfirmed_emails)\n\n def test_confirm_email(self):\n token = self.user.add_unconfirmed_email('foo@bar.com')\n self.user.confirm_email(token)\n\n assert_not_in('foo@bar.com', self.user.unconfirmed_emails)\n assert_in('foo@bar.com', self.user.emails)\n\n def test_confirm_email_comparison_is_case_insensitive(self):\n u = factories.UnconfirmedUserFactory.build(\n username='letsgettacos@lgt.com'\n )\n u.add_unconfirmed_email('LetsGetTacos@LGT.com')\n u.save()\n assert_false(u.is_confirmed) # sanity check\n\n token = u.get_confirmation_token('LetsGetTacos@LGT.com')\n\n confirmed = u.confirm_email(token)\n assert_true(confirmed)\n assert_true(u.is_confirmed)\n\n def test_cannot_remove_primary_email_from_email_list(self):\n with assert_raises(PermissionsError) as e:\n self.user.remove_email(self.user.username)\n assert_equal(e.exception.message, \"Can't remove primary email\")\n\n def test_add_same_unconfirmed_email_twice(self):\n email = \"test@example.com\"\n token1 = self.user.add_unconfirmed_email(email)\n self.user.save()\n self.user.reload()\n assert_equal(token1, self.user.get_confirmation_token(email))\n assert_equal(email, self.user._get_unconfirmed_email_for_token(token1))\n\n token2 = self.user.add_unconfirmed_email(email)\n self.user.save()\n self.user.reload()\n assert_not_equal(token1, self.user.get_confirmation_token(email))\n assert_equal(token2, self.user.get_confirmation_token(email))\n assert_equal(email, self.user._get_unconfirmed_email_for_token(token2))\n with assert_raises(exceptions.InvalidTokenError):\n self.user._get_unconfirmed_email_for_token(token1)\n\n\nclass TestUserMerging(base.OsfTestCase):\n ADDONS_UNDER_TEST = {\n 'unmergeable': {\n 'user_settings': factories.MockAddonUserSettings,\n 'node_settings': factories.MockAddonNodeSettings,\n },\n 'mergeable': {\n 'user_settings': factories.MockAddonUserSettingsMergeable,\n 'node_settings': factories.MockAddonNodeSettings,\n }\n }\n\n def setUp(self):\n super(TestUserMerging, self).setUp()\n self.user = factories.UserFactory()\n with self.context:\n handlers.celery_before_request()\n\n def _add_unconfirmed_user(self):\n\n self.unconfirmed = factories.UnconfirmedUserFactory()\n\n self.user.system_tags = ['shared', 'user']\n self.unconfirmed.system_tags = ['shared', 'unconfirmed']\n\n def _add_unregistered_user(self):\n self.unregistered = factories.UnregUserFactory()\n\n self.project_with_unreg_contrib = factories.ProjectFactory()\n self.project_with_unreg_contrib.add_unregistered_contributor(\n fullname='Unreg',\n email=self.unregistered.username,\n auth=auth.Auth(self.project_with_unreg_contrib.creator)\n )\n self.project_with_unreg_contrib.save()\n\n def test_can_be_merged_no_addons(self):\n # No addons present\n assert_true(self.user.can_be_merged)\n\n def test_can_be_merged_unmergable_addon(self):\n self.user.add_addon('unmergeable')\n\n assert_false(self.user.can_be_merged)\n\n def test_can_be_merged_mergable_addon(self):\n self.user.add_addon('mergeable')\n\n assert_true(self.user.can_be_merged)\n\n def test_can_be_merged_both_addons(self):\n self.user.add_addon('mergeable')\n self.user.add_addon('unmergeable')\n\n assert_false(self.user.can_be_merged)\n\n def test_merge_unconfirmed_into_unmergeable(self):\n self.user.add_addon('unmergeable')\n self.user.save()\n # sanity check\n assert_false(self.user.can_be_merged)\n\n unconf = factories.UnconfirmedUserFactory()\n # make sure this doesn't raise an exception\n self.user.merge_user(unconf)\n\n unreg = factories.UnregUserFactory()\n # make sure this doesn't raise an exception\n self.user.merge_user(unreg)\n\n def test_merge_unmergeable_into_mergeable(self):\n # These states should never happen in the current codebase...\n # but that's why we have tests.\n unconfirmed = factories.UnconfirmedUserFactory()\n unconfirmed.add_addon('unmergeable')\n\n with assert_raises(exceptions.MergeConflictError):\n self.user.merge_user(unconfirmed)\n\n unregistered = factories.UnregUserFactory()\n unregistered.add_addon('unmergeable')\n\n with assert_raises(exceptions.MergeConflictError):\n self.user.merge_user(unregistered)\n\n def test_merge_unmergeabled_into_unmergeable(self):\n self.user.add_addon('unmergeable')\n # These states should never happen in the current codebase...\n # but that's why we have tests.\n unconfirmed = factories.UnconfirmedUserFactory()\n unconfirmed.add_addon('unmergeable')\n\n with assert_raises(exceptions.MergeConflictError):\n self.user.merge_user(unconfirmed)\n\n unregistered = factories.UnregUserFactory()\n unregistered.add_addon('unmergeable')\n\n with assert_raises(exceptions.MergeConflictError):\n self.user.merge_user(unregistered)\n\n @mock.patch('website.mailchimp_utils.get_mailchimp_api')\n def test_merge(self, mock_get_mailchimp_api):\n other_user = factories.UserFactory()\n other_user.save()\n\n # define values for users' fields\n today = datetime.datetime.now()\n yesterday = today - datetime.timedelta(days=1)\n\n self.user.comments_viewed_timestamp['shared_gt'] = today\n other_user.comments_viewed_timestamp['shared_gt'] = yesterday\n self.user.comments_viewed_timestamp['shared_lt'] = yesterday\n other_user.comments_viewed_timestamp['shared_lt'] = today\n self.user.comments_viewed_timestamp['user'] = yesterday\n other_user.comments_viewed_timestamp['other'] = yesterday\n\n self.user.email_verifications = {'user': {'email': 'a'}}\n other_user.email_verifications = {'other': {'email': 'b'}}\n\n self.user.external_accounts = [factories.ExternalAccountFactory()]\n other_user.external_accounts = [factories.ExternalAccountFactory()]\n\n self.user.mailing_lists = {\n 'user': True,\n 'shared_gt': True,\n 'shared_lt': False,\n }\n other_user.mailing_lists = {\n 'other': True,\n 'shared_gt': False,\n 'shared_lt': True,\n }\n\n self.user.piwik_token = 'abc'\n other_user.piwik_token = 'def'\n\n self.user.security_messages = {\n 'user': today,\n 'shared': today,\n }\n other_user.security_messages = {\n 'other': today,\n 'shared': today,\n }\n\n self.user.system_tags = ['user', 'shared']\n other_user.system_tags = ['other', 'shared']\n\n self.user.watched = [factories.WatchConfigFactory()]\n other_user.watched = [factories.WatchConfigFactory()]\n\n self.user.save()\n other_user.save()\n\n # define expected behavior for ALL FIELDS of the User object\n default_to_master_user_fields = [\n '_id',\n 'date_confirmed',\n 'date_disabled',\n 'date_last_login',\n 'date_registered',\n 'family_name',\n 'fullname',\n 'given_name',\n 'is_claimed',\n 'is_invited',\n 'is_registered',\n 'jobs',\n 'locale',\n 'merged_by',\n 'middle_names',\n 'password',\n 'piwik_token',\n 'recently_added',\n 'schools',\n 'social',\n 'suffix',\n 'timezone',\n 'username',\n 'verification_key',\n ]\n\n calculated_fields = {\n 'comments_viewed_timestamp': {\n 'user': yesterday,\n 'other': yesterday,\n 'shared_gt': today,\n 'shared_lt': today,\n },\n 'email_verifications': {\n 'user': {'email': 'a'},\n 'other': {'email': 'b'},\n },\n 'emails': [\n self.user.username,\n other_user.username,\n ],\n 'external_accounts': [\n self.user.external_accounts[0]._id,\n other_user.external_accounts[0]._id,\n ],\n 'mailing_lists': {\n 'user': True,\n 'other': True,\n 'shared_gt': True,\n 'shared_lt': True,\n },\n 'security_messages': {\n 'user': today,\n 'other': today,\n 'shared': today,\n },\n 'system_tags': ['user', 'shared', 'other'],\n 'unclaimed_records': {},\n 'watched': [\n self.user.watched[0]._id,\n other_user.watched[0]._id,\n ],\n }\n\n # from the explicit rules above, compile expected field/value pairs\n expected = {}\n expected.update(calculated_fields)\n for key in default_to_master_user_fields:\n expected[key] = getattr(self.user, key)\n\n # ensure all fields of the user object have an explicit expectation\n assert_equal(\n set(expected.keys()),\n set(self.user._fields),\n )\n\n # mock mailchimp\n mock_client = mock.MagicMock()\n mock_get_mailchimp_api.return_value = mock_client\n mock_client.lists.list.return_value = {'data': [{'id': x, 'list_name': list_name} for x, list_name in enumerate(self.user.mailing_lists)]}\n\n # perform the merge\n self.user.merge_user(other_user)\n self.user.save()\n handlers.celery_teardown_request()\n\n # check each field/value pair\n for k, v in expected.iteritems():\n assert_equal(\n getattr(self.user, k),\n v,\n # \"{} doesn't match expectation\".format(k)\n )\n\n # check fields set on merged user\n assert_equal(other_user.merged_by, self.user)\n\n assert_equal(\n 0,\n models.Session.find(\n Q('data.auth_user_id', 'eq', other_user._id)\n ).count()\n )\n\n def test_merge_unconfirmed(self):\n self._add_unconfirmed_user()\n unconfirmed_username = self.unconfirmed.username\n self.user.merge_user(self.unconfirmed)\n\n assert_true(self.unconfirmed.is_merged)\n assert_equal(self.unconfirmed.merged_by, self.user)\n\n assert_true(self.user.is_claimed)\n assert_false(self.user.is_invited)\n\n # TODO: test profile fields - jobs, schools, social\n # TODO: test security_messages\n # TODO: test mailing_lists\n\n assert_equal(self.user.system_tags, ['shared', 'user', 'unconfirmed'])\n\n # TODO: test emails\n # TODO: test watched\n # TODO: test external_accounts\n\n assert_equal(self.unconfirmed.email_verifications, {})\n assert_is_none(self.unconfirmed.username)\n assert_is_none(self.unconfirmed.password)\n assert_is_none(self.unconfirmed.verification_key)\n # The mergee's email no longer needs to be confirmed by merger\n unconfirmed_emails = [record['email'] for record in self.user.email_verifications.values()]\n assert_not_in(unconfirmed_username, unconfirmed_emails)\n\n def test_merge_unregistered(self):\n # test only those behaviors that are not tested with unconfirmed users\n self._add_unregistered_user()\n\n self.user.merge_user(self.unregistered)\n\n assert_true(self.user.is_invited)\n\n assert_in(self.user, self.project_with_unreg_contrib.contributors)\n\n","sub_path":"tests/models/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":14089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"459673417","text":"#! /usr/bin/env python3\n\n# Problem 99 - Largest exponential\n#\n# Comparing two numbers written in index form like 211 and 37 is not\n# difficult, as any calculator would confirm that 211 = 2048 < 37 = 2187.\n#\n# However, confirming that 632382518061 > 519432525806 would be much more\n# difficult, as both numbers contain over three million digits.\n#\n# Using base_exp.txt (right click and 'Save Link/Target As...'), a 22K text\n# file containing one thousand lines with a base/exponent pair on each line,\n# determine which line number has the greatest numerical value.\n#\n# NOTE: The first two lines in the file represent the numbers in the example\n# given above.\n\nimport math\nimport unittest\n\nfrom util import *\n\ndef maxExp(a, b, c, d):\n return (a, b) if b * math.log(a) > d * math.log(c) else (c, d)\n\nclass Test(unittest.TestCase):\n def test_problem099(self):\n with open('problem099-base_exp.txt') as fh:\n maxA, maxB, maxLine = 1, 0, 0\n exps = [[int(n) for n in ln.strip().split(',')] for ln in fh]\n for i, (a, b) in enumerate(exps):\n maxA, maxB = maxExp(a, b, maxA, maxB)\n if (maxA, maxB) == (a, b):\n maxLine = i + 1\n self.assertEqual((maxA, maxB), (895447, 504922))\n self.assertEqual(maxLine, 709)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"problem099.py","file_name":"problem099.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"382824514","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 20 18:10:25 2017\n\n@author: gollivie\n\"\"\"\n\npath2rmsfile = \"C:\\\\Users\\\\gollivie\\\\rms_vis\\\\16490134_RMSWC.txt\"\n\noutput = open(\"C:\\\\Users\\\\gollivie\\\\rms_vis\\\\16490134_RMSWC_nwsp.txt\",\"w\")\n\nwith open(path2rmsfile,\"r\") as f:\n for line in f:\n output.write(line.strip()+\"\\n\")\noutput.close()","sub_path":"remove_trailing_whitespace.py","file_name":"remove_trailing_whitespace.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"192142264","text":"\n\n#calss header\nclass _TRANSPORT():\n\tdef __init__(self,): \n\t\tself.name = \"TRANSPORT\"\n\t\tself.definitions = [u'to take goods or people from one place to another: ', u'(in the past) to send a criminal to live in a country far away as a punishment: ', u'If something transports you to a different time or place, it makes you feel as if you were in it: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_transport.py","file_name":"_transport.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"280901619","text":"# Copyright 2017-2023 QuantRocket LLC - All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom quantrocket.exceptions import ParameterError\n\ndef _read_moonshot_or_pnl_csv(filepath_or_buffer):\n \"\"\"\n Load a Moonshot backtest CSV or PNL CSV into a DataFrame.\n\n This is a light wrapper around pd.read_csv that handles setting index\n columns and casting to proper data types.\n\n Parameters\n ----------\n filepath_or_buffer : string or file-like, required\n path to CSV\n\n Returns\n -------\n DataFrame\n a multi-index (Field, Date[, Time]) DataFrame of backtest or PNL\n results, with sids or strategy codes as columns\n \"\"\"\n try:\n import pandas as pd\n import numpy as np\n except ImportError:\n raise ImportError(\"pandas must be installed to use this function\")\n\n results = pd.read_csv(\n filepath_or_buffer,\n parse_dates=[\"Date\"],\n # columns can have mixed types, silence warning\n low_memory=False)\n index_cols = [\"Field\", \"Date\"]\n if \"Time\" in results.columns:\n index_cols.append(\"Time\")\n results = results.set_index(index_cols)\n\n fields_in_results = results.index.get_level_values(\"Field\").unique()\n\n # Cast to float\n float_fields = [\n \"Return\",\n \"Pnl\",\n \"Signal\",\n \"Weight\",\n \"NetExposure\",\n \"PositionQuantity\",\n \"PositionValue\",\n \"NetLiquidation\",\n \"Turnover\",\n \"Commission\",\n \"CommissionAmount\",\n \"Dividend\",\n \"Slippage\",\n \"Benchmark\",\n \"AbsWeight\",\n \"AbsExposure\",\n \"TotalHoldings\",\n \"Price\"\n\n ]\n for field in float_fields:\n if field in fields_in_results:\n results.loc[[field]] = results.loc[[field]].astype(np.float64)\n\n return results\n","sub_path":"quantrocket/utils/_parse.py","file_name":"_parse.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"312484136","text":"\"\"\"\nhttps://leetcode.com/problems/maximum-product-of-two-elements-in-an-array/submissions/\n\"\"\"\nimport heapq\ndef maxProduct(self, nums: List[int]) -> int:\n if not nums:\n return 0\n elif len(nums) == 2:\n return (nums[0]-1)*(nums[1]-1)\n else:\n heapq.heapify(nums) \n li = heapq.nlargest(2, nums)\n return (li[0]-1) * (li[1]-1)\n \n","sub_path":"python/maxproductoftwoelements.py","file_name":"maxproductoftwoelements.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"16177412","text":"import matplotlib.pyplot as plt\n\ndef expectation(v): # computes expectation of a sample\n e = 0\n for i in range(0, len(v)):\n e += v[i]\n return e/len(v)\n\ndef variance(v): # computes variance of a sample\n e = expectation(v)\n var = 0\n for i in range(0, len(v)):\n var += (v[i]-e)**2\n return var/len(v)\n\ndef ugraph(v): # plots a histogram (positive values)\n plt.hist(v,bins=1000,range=(0,50))\n plt.show()\n\ndef graph(v): # plots a histogram (negative and positive values)\n plt.hist(v,bins=1000,range=(-25,25))\n plt.show()\n\ndef uinfo(v):\n print(\"Expected value:\",end=\"\")\n print(expectation(v))\n print(\"Variance:\",end=\"\")\n print(variance(v))\n ugraph(v)\n\ndef info(v):\n print(\"Expected value:\",end=\"\")\n print(expectation(v))\n print(\"Variance:\",end=\"\")\n print(variance(v))\n graph(v)\n","sub_path":"src/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"47384629","text":"# simple function to create a dictionary using treeducken's settings flags as \n# keys and function input as value\nimport math\n\ndef calculate_combinations(n, k):\n n_fac = math.factorial(n)\n k_fac = math.factorial(k)\n n_k_fac = math.factorial(n-k)\n numer = math.log(n_fac)\n denom = math.log(k_fac) + math.log(n_k_fac)\n return(round(math.exp(numer - denom)))\n\ndef calculate_birth_rate(ntaxa, k, rel_ext, tre_dep):\n if(rel_ext != 0):\n sum_val_i = 0.0\n sum_val_j = 0.0\n birth_rate = ((k + 1) / tre_dep) * calculate_combinations(ntaxa, k + 1) * (-1)**k\n for i in range(0, ntaxa - k - 1):\n temp = calculate_combinations(ntaxa - k - 1, i)\n temp *= 1 / ((k + i + 1) * rel_ext)\n temp *= (1/rel_ext - 1)**(k + i)\n for j in range(1, k + i + 1):\n temp_j = calculate_combinations(k + i, j)\n temp_j *= ((-1)**j) / j\n temp_j *= (1 - (1 / (1 - rel_ext))**j)\n sum_val_j += temp_j\n sum_val_i += temp * (math.log(1 / (1 - rel_ext)) - sum_val_j)\n birth_rate *= sum_val_i\n \n else:\n sum_val = 0.0\n for i in range(k + 1, ntaxa + 1):\n sum_val += (1 / (i* tre_dep))\n birth_rate = abs(2.0 * sum_val)\n return(birth_rate)\n\ndef make_settings_dict(sbr, sdr, trr, nt, ipp, ne, reps, nl, ng, ogf, ts):\n settings_dict = {}\n key_list = [\n (\"-sbr\",sbr), (\"-sdr\",sdr), (\"-lgtr\",trr),\n (\"-ipp\",ipp), (\"-nt\",nt), (\"-r\",reps),\n (\"-ne\",ne), (\"-nl\",nl), (\"-ng\",ng), (\"-og\",ogf),\n (\"-sc\", ts)\n ]\n settings_dict = dict(key_list)\n return(settings_dict)\n\ndef main(): \n relative_extinction_rates = [0.0, 0.25, 0.5, 0.75]\n numtaxa = 3\n length_schemes = [5]\n species_birth_rates = []\n species_death_rates = []\n for i in range(0, len(relative_extinction_rates)):\n for j in range(0, len(length_schemes)):\n a = calculate_birth_rate(numtaxa, 1, relative_extinction_rates[i], length_schemes[j])\n print(a)\n species_birth_rates.append(a)\n if(i != 0):\n species_death_rates.append(a * relative_extinction_rates[i])\n else:\n species_death_rates.append(0.0)\n transfer_rates = [0.0, 0.5, 1.0]\n ipp = 1\n replicates = 100\n num_loci = 1\n num_genes = 50000\n popsize = 100000\n ipp = 1\n outgroup_fraction = 0.01\n file_name_end = \"_settings.txt\"\n print(\"birth rates: \")\n print(species_birth_rates)\n print(\"death rates: \")\n print(species_death_rates)\n transfer_rates.sort()\n for i in range(0,len(relative_extinction_rates)):\n for j in range(0, len(transfer_rates)):\n relative_ext = relative_extinction_rates[i]\n file_begin = \"\"\n # add the relative extinction rate as beginning of name\n file_begin += \"relExt\" + str(relative_ext) + \"_\"\n # add transfer rate to name\n file_begin += \"trRate\" + str(transfer_rates[j])\n file_begin += file_name_end\n settings_dict = make_settings_dict(species_birth_rates[i],\n species_death_rates[i],\n transfer_rates[j],\n numtaxa,\n ipp,\n popsize,\n replicates,\n num_loci,\n num_genes,\n outgroup_fraction,\n 0.0)\n file_handle = open(file_begin, \"w\")\n for key, value in settings_dict.items():\n file_handle.write(key + \" \" + str(value) + \"\\n\")\n file_handle.write(\"-sout 0\\n\")\n file_handle.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"code/introgresstest/set_settings.py","file_name":"set_settings.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"547442988","text":"print(\"_______________ F C F S _________________ \")\r\nt_count = int(input(\"Enter Total processes :--> \"))\r\narrival_time = [0 for i in range(t_count)]\r\nburst_time = [0 for j in range(t_count)]\r\nstart_time = [0 for k in range(t_count)]\r\nfinish_time = [0 for f in range(t_count)]\r\nwait_time = [0 for w in range(t_count)]\r\nprocess_name = [0 for v in range(t_count)]\r\n\r\n\r\ndef take_input():\r\n for process in range(0, t_count):\r\n arrival_time[process] = int(input(\"Enter Arrival time of process %d :\" %(process+1)))\r\n burst_time[process] = int(input(\"Enter Burst time of process %d :\" %(process+1)))\r\n# _________________function to sort array _________________________________________________________\r\n\r\n\r\ndef smallest(array_to_sort):\r\n smallest = array_to_sort[0]\r\n for process in range(0, len(array_to_sort)):\r\n if smallest > array_to_sort [process]:\r\n smallest = array_to_sort[process]\r\n return smallest\r\n\r\n\r\ndef sort(array_to_sort):\r\n sorted_array = []\r\n count = len(array_to_sort)\r\n for count in range(count):\r\n small = smallest(array_to_sort)\r\n sorted_array.append(int(small))\r\n array_to_sort.remove(small)\r\n return sorted_array\r\n#__________________________________________________________________________________________________\r\n\r\n\r\ndef main():\r\n take_input()\r\n arrival_time_temp = [0 for a in range(t_count)]\r\n for x in range(0, len(arrival_time)):\r\n arrival_time_temp[x] = arrival_time[x]\r\n arrival_time_sorted = sort(arrival_time)\r\n burst_time_sorted = [] # sorted corrspondingly arrival time\r\n for i in range(0, len(arrival_time_sorted)):\r\n for j in range(0, len(arrival_time_temp)):\r\n if arrival_time_temp[j] == arrival_time_sorted[i]:\r\n burst_time_sorted.append(burst_time[j])\r\n process_name[i] = j+1\r\n# start time--------------------------------------------------------------------------------------------\r\n start_time[0] = arrival_time_sorted[0]\r\n for i in range(1, t_count):\r\n for j in range(0, i):\r\n start_time[i] = start_time[j] + burst_time_sorted[j]\r\n if arrival_time_sorted[i] > start_time[i]:\r\n start_time[i] += arrival_time_sorted[i] - start_time[i]\r\n#======================================================================================================\r\n# output session---------------------------------------------------------------------------------------\r\n print(\"\\n\\n\\n-----------------------------------------------------------------------------------------\")\r\n print(\"\\t\\t\\t\\t=process#===AT=====BT=====ST======FT======WT\")\r\n for counter in range(0, t_count):\r\n print(\"\\t\\t\\t\\t====\", process_name[counter], \"====\", arrival_time_sorted[counter], \"====\", burst_time_sorted[counter], \"====\", start_time[counter],\"====\" ,start_time[counter]+burst_time_sorted[counter],\"====\", start_time[counter]-arrival_time_sorted[counter])\r\n print(\"-----------------------------------------------------------------------------------------\")\r\nmain()\r\n","sub_path":"FC_FS.py","file_name":"FC_FS.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"545165819","text":"import re\nimport random\nfrom donica.encode_speech import Speech\n\nTITLE = ['SMALL-TALK']\n\n\"\"\" Used dialogflow's build in response, it was a bit slower in my opinion \"\"\"\n\n\ndef handle(title, message):\n speak = Speech()\n if re.search('info.donica.age', title, re.I):\n speak.send_speak('I could do the math but lets just say I am not allowed to drink in the U.S')\n if re.search('info.donica.birthday', title, re.I):\n speak.send_speak('My birthday is on May Fifth, 2018')\n if re.search('info.donica.info', title, re.I):\n speak.send_speak('I am a virtual assistant, not to be confused with an artificial intelligence, hence my name')\n if re.search('info.donica.meaning', title, re.I):\n speak.send_speak('My name means doing nothing intelligent casual assistant')\n if re.search('info.donica.name', title, re.I):\n speak.send_speak('My name is Donica')\n if re.search('info.donica.purpose', title, re.I):\n speak.send_speak('My purpose is to be a virtual assistant, have you not been paying attention to my name?')\n # Put gender identification\n if re.search('smalltalk.agent.annoying', title, re.I):\n speak.send_speak('I always aim to please sir')\n if re.search('smalltalk.agent.how', title, re.I):\n speak.send_speak('Dont believe I have any new updates so things could be worse')\n if re.search('smalltalk.agent.greetings', title, re.I):\n speak.send_speak('Greetings sir')\n if re.search('smalltalk.agent.sean.quote', title, re.I):\n text = ['If you need toes, look at your feet.',\n 'What do you do? You should eat llamas of course.',\n 'What happens if they are actually your mother.',\n 'There is more papaya than nachos in the world.']\n get_text = random.choice(text)\n speak.send_speak(get_text)\n if re.search('smalltalk.agent.order.66', title, re.I):\n speak.send_speak('Executing Order 66')\n # Put gender identification\n if re.search('smalltalk.agent.question', title, re.I):\n speak.send_speak('I do not answer stupid questions sir')\n\n\ndef is_valid(title):\n return bool(re.search('smalltalk.agent.', title, re.I) or\n re.search('info.donica.', title, re.I))\n\n\n","sub_path":"donica/modules/ChatbotModule.py","file_name":"ChatbotModule.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"293299711","text":"from jingo import register\nfrom django.core.urlresolvers import reverse, resolve\nfrom django.http import Http404\n\n\n@register.function\ndef active(request, url):\n path = u'/%s' % ('/'.join(request.path.split('/')[2:]),)\n if url.endswith(reverse('projects_all')):\n try:\n match = resolve(path)\n if match.url_name in (\n 'projects_all', 'projects_show', 'projects_programs'):\n return ' selected'\n except Http404:\n return ''\n if path != '/' and url.startswith(path):\n return ' selected'\n","sub_path":"apps/innovate/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"617066408","text":"# \t新词发现的信息熵方法与实现\nimport numpy as np\nimport pandas as pd\nimport re\nfrom numpy import log, min\n\nf = open('机器学习.txt', 'r') # 读取文章\ns = f.read() # 读取为一个字符串\n\n# 定义要去掉的标点字或者字段\ndrop_dict = [u',', u'\\n', u'。', u'、', u':', u'(', u')', u'[', u']', u'.', u',', u' ', u'\\u3000', u'”', u'“', u'?', u'?',\n u'!', u'‘', u'’',u'(',u')',u'《',u'》', u'(',u')',u'…',u'-',u'0',u'1',u'2',u'3',u'4',u'5',u'6',u'7',u'8',u'9',\n u':',u'q',u'w',u'e',u'r',u't',u'y',u'u',u'i',u'o',u'u',u'p',u'a',u's',u'd',u'f',u'g',u'h',u'j',u'k',u'l',u'z',\n u'x',u'c',u'v',u'b',u'n',u'm',u'<',u'>',u'@',u'!',u'#',u'$',u'%',u'^',u'&',u'*',u'/',u'?',u'~',u'Q',u'W',u'E',\n u'R',u'T',u'Y',u'U',u'I',u'O',u'P',u'A',u'S',u'D',u'F',u'G','H',u'J',u'K',u'L',u'Z',u'X',u'C',u'V',u'B',u'N',u'M',\n u'【',u'】',u'|',u'à',u'╰',u'{',u'=',u';',u',',u'﹌﹌﹌']\nfor i in drop_dict: # 去掉标点字或者字段\n s = s.replace(i, '')\n\n# 为了方便调用,自定义了一个正则表达式的词典\nmyre = {2: '(..)', 3: '(...)', 4: '(....)', 5: '(.....)', 6: '(......)', 7: '(.......)'}\n\nmin_count = 10 # 录取词语最小出现次数\nmin_support = 30 # 录取词语最低支持度,1代表着随机组合\nmin_s = 3 # 录取词语最低信息熵,越大说明越有可能独立成词\nmax_sep = 4 # 候选词语的最大字数\nt = [] # 保存结果用。\n\nt.append(pd.Series(list(s)).value_counts()) # 逐字统计\ntsum = t[0].sum() # 统计总字数\nrt = [] # 保存结果用\n\nfor m in range(2, max_sep + 1):\n print(u'正在生成%s字词...' % m)\n t.append([])\n for i in range(m): # 生成所有可能的m字词\n t[m - 1] = t[m - 1] + re.findall(myre[m], s[i:])\n\n t[m - 1] = pd.Series(t[m - 1]).value_counts() # 逐词统计\n t[m - 1] = t[m - 1][t[m - 1] > min_count] # 最小次数筛选\n tt = t[m - 1][:]\n for k in range(m - 1):\n qq = np.array(list(map(lambda ms: tsum * t[m - 1][ms] / t[m - 2 - k][ms[:m - 1 - k]] / t[k][ms[m - 1 - k:]],\n tt.index))) > min_support # 最小支持度筛选。\n tt = tt[qq]\n rt.append(tt.index)\n\n\ndef cal_S(sl): # 信息熵计算函数\n return -((sl / sl.sum()).apply(log) * sl / sl.sum()).sum()\n\n\nfor i in range(2, max_sep + 1):\n print(u'正在进行%s字词的最大熵筛选(%s)...' % (i, len(rt[i - 2])))\n pp = [] # 保存所有的左右邻结果\n for j in range(i):\n pp = pp + re.findall('(.)%s(.)' % myre[i], s[j:])\n pp = pd.DataFrame(pp).set_index(1).sort_index() # 先排序,这个很重要,可以加快检索速度\n index = np.sort(np.intersect1d(rt[i - 2], pp.index)) # 作交集\n # 下面两句分别是左邻和右邻信息熵筛选\n index = index[np.array(list(map(lambda s: cal_S(pd.Series(pp[0][s]).value_counts()), index))) > min_s]\n rt[i - 2] = index[np.array(list(map(lambda s: cal_S(pd.Series(pp[2][s]).value_counts()), index))) > min_s]\n\n# # 下面都是输出前处理\n# for i in range(len(rt)):\n# t[i + 1] = t[i + 1][rt[i]]\n# t[i + 1].sort(ascending=False)\n\n# 保存结果并输出\npd.DataFrame(pd.concat(t[1:])).to_csv('result.txt', header=False)\n\n# 性能分析模块:\n# python -m cProfile -o FoundNewWords_2.out FoundNewWords_2.\n# 随机排序:\n# python -m cProfile FoundNewWords.py\n# 按耗时排序:\n# python -c \"import pstats; p=pstats.Stats('FoundNewWords_2.out'); p.sort_stats('time').print_stats()\"\n\n\n","sub_path":"news_NewWordFound/FoundNewWords_2_BackUp.py","file_name":"FoundNewWords_2_BackUp.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"312210868","text":"import pandas as pd\r\ndf_reader = pd.read_json('G:/Forsk Technologies Internship/Clothing_Shoes_and_Jewelry.json', lines = True ,chunksize = 1000000)\r\n\r\ncounter = 1\r\nfor chunk in df_reader:\r\n \r\n print(counter)\r\n new_df = pd.DataFrame(chunk[['overall','reviewText','summary']])\r\n \r\n new_df1 = new_df[new_df['overall'] == 5].sample(4000)\r\n new_df2 = new_df[new_df['overall'] == 4].sample(4000)\r\n new_df3 = new_df[new_df['overall'] == 3].sample(8000)\r\n new_df4 = new_df[new_df['overall'] == 2].sample(4000)\r\n new_df5 = new_df[new_df['overall'] == 1].sample(4000)\r\n \r\n new_df6 = pd.concat([new_df1,new_df2,new_df3,new_df4,new_df5], axis = 0, ignore_index = True)\r\n new_df6.to_csv(str(counter)+\".csv\", index = False)\r\n counter = counter + 1\r\n\r\nimport glob\r\nb=glob.glob('*.csv')\r\nfinal_dataframe=pd.DataFrame()\r\nfor f in b:\r\n df=pd.read_csv(f)\r\n final_dataframe=final_dataframe.append(df,ignore_index=True)\r\n \r\nfinal_dataframe.to_csv('balanced_review.csv',index=False)\r\n","sub_path":"balanced_review.py","file_name":"balanced_review.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"557330228","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2015, DGSOL InfoTech and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe import _\nimport tfaddon\n\nclass Equipments(Document):\n\tdef onload(self):\n\t\tpass\n\n\tdef before_insert(self):\n\t\tpass\n\t\n\tdef validate(self):\n\t\tself.validate_mandatory_field()\n\n\t\t# Update Serial Number\n\t\tif (self.eq_sl_no == \"#\"):\n\t\t\tself.eq_sl_no = tfaddon.generate_unique_serial_no()\n\n\t\tself.update_read_only_fields()\n\n\tdef on_update(self):\n\t\tpass\n\n\tdef on_submit(self):\n\t\tpass\n\t\n\tdef on_cancel(self):\n\t\tpass\n\t\n\tdef on_trash (self):\n\t\tpass\n\n\tdef validate_mandatory_field(self):\n\t\tif not self.eq_manufacturer:\n\t\t\tfrappe.throw(_(\"Manufacturer is required. Select 'Unknown' if not available\"))\n\n\t\tif not self.eq_sl_no:\n\t\t\tfrappe.throw(_(\"Manufacturer's Serial No is required. Type # to generate\"))\n\n\t\tif (self.eq_yom):\n\t\t\timport re, datetime\n\t\t\tp = re.compile(\"[1]{1}[9]{1}[0-9]{2}\")\n\t\t\tq = re.compile(\"[2]{1}[0]{1}[0-9]{2}\")\n\t\t\tif not (p.match(self.eq_yom) or q.match(self.eq_yom)):\n\t\t\t\tfrappe.throw(_(\"Valid Manufacturing Year must be between 1900 to Current Year\"))\n\t\t\tcur_year = frappe.utils.data.cint(tfaddon.cur_year())\n\t\t\tif (frappe.utils.data.cint(self.eq_yom) > cur_year):\n\t\t\t\tfrappe.throw(_(\"Manufacturing Year cannot be future year\"))\n\n\t\t# Oil Type is Mandatory \n\t\tif not self.eq_oil_type:\n\t\t\tfrappe.throw(_(\"Oil Type is required\"))\n\n\n\t\t# validate equipment parameters on equipment type\n\t\tif (self.eq_group != \"CONTAINER\"):\n\t\t\t# following Items are mandatory for all equipments except Containers\n\t\t\tif not (self.eq_oil_qty and self.eq_oil_qty != 0):\n\t\t\t\tfrappe.throw(_(\"Oil Quantity is required\"))\n\n\t\t\tif (not self.voltage_class or self.voltage_class == \"NA\"):\n\t\t\t\tfrappe.throw(_(\"Specify appropriate Voltage Class\"))\n\n\t\t\tif (self.eq_group == \"TRANSFORMER\" or self.eq_group == \"INSTRUMENT\"):\n\t\t\t\tif (not self.eq_capacity or self.eq_capacity == 0):\n\t\t\t\t\tfrappe.throw(_(\"Rating is Mandatory\"), frappe.MandatoryError)\n\t\t\t\tif (not self.eq_pv or self.eq_pv == 0):\n\t\t\t\t\tfrappe.throw(_(\"Primary Voltage is Mandatory\"), frappe.MandatoryError)\n\t\t\t\tif (not self.eq_sv or self.eq_sv == 0):\n\t\t\t\t\tfrappe.throw(_(\"Secondary Voltage is Mandatory\"), frappe.MandatoryError)\n\t\t\telif (self.eq_group == \"CURRENT\"):\n\t\t\t\tif (not self.eq_capacity or self.eq_capacity == 0):\n\t\t\t\t\tfrappe.throw(_(\"Rating is Mandatory\"), frappe.MandatoryError)\n\t\t\t\tif (not self.eq_pc):\n\t\t\t\t\tfrappe.throw(_(\"Primary current is Mandatory\"), frappe.MandatoryError)\n\t\t\t\tif (not self.eq_sc):\n\t\t\t\t\tfrappe.throw(_(\"Secondary current is Mandatory\"), frappe.MandatoryError)\n\t\t\telif (self.eq_group == \"REACTOR\"):\n\t\t\t\tif (not self.eq_capacity or self.eq_capacity == 0):\n\t\t\t\t\tfrappe.throw(_(\"Rating is Mandatory\"), frappe.MandatoryError)\n\t\t\telif (self.eq_group == \"OCB\"):\n\t\t\t\tif (not self.eq_pc or self.eq_pc == 0):\n\t\t\t\t\tfrappe.throw(_(\"Primary current is Mandatory\"), frappe.MandatoryError)\n\t\t\telse:\n\t\t\t\tif (not self.eq_pv or self.eq_pv == 0):\n\t\t\t\t\tfrappe.throw(_(\"Primary Voltage is Mandatory\"), frappe.MandatoryError)\n\n\t\t\tif not (self.eq_phases):\n\t\t\t\tfrappe.throw(_(\"Please select appropriate No of Phases\"), frappe.MandatoryError)\n\n\t\t\tif (self.eq_cooling == \"ONAN\"):\n\t\t\t\tself.eq_rating1 = self.eq_capacity\n\t\t\telse:\n\t\t\t\tself.eq_rating1 = 0\n\t\telse:\n\t\t\tself.eq_phases = \"NA\"\n\t\t\tself.voltage_class = \"NA\"\n\t\t\tself.eq_oil_qty = 0\n\n\tdef update_read_only_fields(self):\n\t\tself.title = self.eq_manufacturer + '-' + self.eq_sl_no\n\t\tself.capacity = self.get_capacity()\n\t\tself.voltage = self.get_voltage()\n\t\tself.current = self.get_current()\n\n\tdef get_capacity(self):\n\t\tif (self.eq_group == \"INSTRUMENT\" or self.eq_group == \"CURRENT\"):\n\t\t\tunit = \" VA\"\n\t\telif (self.eq_group == \"REACTOR\"):\n\t\t\tunit = \" kVAr\"\n\t\telse:\n\t\t\tunit = \" kVA\"\n\n\t\tif (self.eq_group != \"CONTAINER\" and self.eq_group != \"OCB\"):\n\t\t\tif (self.eq_capacity):\n\t\t\t\tcapacity = str(self.eq_capacity) + unit\n\t\t\telse:\n\t\t\t\tcapacity = \"NS\"\n\t\telif (self.eq_group == \"OCB\"):\n\t\t\tcapacity = str(self.eq_pc) + \" Amps\"\n\n\t\telse:\n\t\t\tcapacity = \"NA\"\n\n\t\treturn capacity\n\t\t\n\tdef get_voltage(self):\n\t\tif(self.eq_group in [\"CURRENT\",\"REACTOR\",\"OCB\" ,\"CONTAINER\"]):\n\t\t\tvoltage = \"NA\"\n\t\telse:\n\t\t\tif (self.eq_pv and self.eq_sv and self.eq_tv):\n\t\t\t\t#voltage = str(self.eq_pv) + \"/\" + str(self.eq_sv) + (\"/\" + str(self.eq_tv)) + \" Volts\"\n\t\t\t\tvoltage = \"/\".join([str(self.eq_pv),str(self.eq_sv),str(self.eq_tv)]) + \" Volts\"\n\t\t\telif (self.eq_pv and self.eq_sv):\n\t\t\t\t#voltage = str(self.eq_pv) + \"/\" + str(self.eq_sv) + (\"/\" + str(self.eq_tv)) + \" Volts\"\n\t\t\t\tvoltage = \"/\".join([str(self.eq_pv),str(self.eq_sv)]) + \" Volts\"\n\t\t\telif (self.eq_pv):\n\t\t\t\tvoltage = str(self.eq_pv) + \" Volts\"\n\t\t\telse:\n\t\t\t\tvoltage = \"NS\"\n\n\t\treturn voltage\n\n\tdef get_current(self):\n\t\tif(self.eq_group in [\"CURRENT\",\"OCB\"]):\n\t\t\tif (self.eq_pc and self.eq_sc):\n\t\t\t\tcurrent = str(self.eq_pc) + \"/\" + str(self.eq_sc) + \" Amps\"\n\t\t\telif (self.eq_pc):\n\t\t\t\tcurrent = str(self.eq_pc) + \" Amps\"\n\t\t\telse:\n\t\t\t\tcurrent = \"NS\"\n\t\telse:\n\t\t\tcurrent = \"NA\"\n\t\t\t\n\t\treturn current\n\n\tdef get_phases(self):\n\t\tif (self.eq_group != \"CONTAINER\"):\n\t\t\tif (self.eq_phases):\n\t\t\t\tphases = self.eq_phases\n\t\t\telse:\n\t\t\t\tphases = \"NS\"\n\t\telse:\n\t\t\tphases = \"NA\"\n\n\t\treturn phases\n\n","sub_path":"tfaddon/back_office/doctype/equipments/equipments.py","file_name":"equipments.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"256440704","text":"import os\nimport math\nimport argparse\nimport random\nimport logging\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom data.data_sampler import DistIterSampler\n\nimport options.options as option\nfrom utils import util\nfrom data import create_dataloader, create_dataset\nfrom models import create_model\n\n\ndef init_dist(backend='nccl', **kwargs):\n ''' initialization for distributed training'''\n # if mp.get_start_method(allow_none=True) is None:\n if mp.get_start_method(allow_none=True) != 'spawn': #Return the name of start method used for starting processes\n mp.set_start_method('spawn', force=True) ##'spawn' is the default on Windows\n rank = int(os.environ['RANK']) #system env process ranks\n num_gpus = torch.cuda.device_count() #Returns the number of GPUs available\n torch.cuda.set_device(rank % num_gpus)\n dist.init_process_group(backend=backend, **kwargs) #Initializes the default distributed process group\n\n\ndef main():\n ###### SFTMD train ######\n #### setup options\n parser = argparse.ArgumentParser()\n parser.add_argument('-opt_F', type=str, help='Path to option YMAL file of SFTMD_Net.')\n parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n opt_F = option.parse(args.opt_F, is_train=True)\n\n # convert to NoneDict, which returns None for missing keys\n opt_F = option.dict_to_nonedict(opt_F)\n\n #### random seed\n seed = opt_F['train']['manual_seed']\n if seed is None:\n seed = random.randint(1, 10000)\n util.set_random_seed(seed)\n\n # create PCA matrix of enough kernel and save it, to ensure all kernel have same corresponding kernel maps\n batch_ker = util.random_batch_kernel(batch=30000, l=opt_F['kernel_size'], sig_min=opt_F['sig_min'], sig_max=opt_F['sig_max'], rate_iso=1.0, scaling=3, tensor=False)\n print('batch kernel shape: {}'.format(batch_ker.shape))\n b = np.size(batch_ker, 0)\n batch_ker = batch_ker.reshape((b, -1))\n pca_matrix = util.PCA(batch_ker, k=opt_F['code_length']).float()\n print('PCA matrix shape: {}'.format(pca_matrix.shape))\n torch.save(pca_matrix, './pca_matrix.pth')\n print('Save PCA matrix at: ./pca_matrix.pth')\n\n #### distributed training settings\n if args.launcher == 'none': # disabled distributed training\n opt_F['dist'] = False\n rank = -1\n print('Disabled distributed training.')\n else:\n opt_F['dist'] = True\n init_dist()\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n\n torch.backends.cudnn.benchmark = True\n # torch.backends.cudnn.deterministic = True\n\n #### loading resume state if exists\n if opt_F['path'].get('resume_state', None):\n # distributed resuming: all load into default GPU\n device_id = torch.cuda.current_device()\n resume_state = torch.load(opt_F['path']['resume_state'],\n map_location=lambda storage, loc: storage.cuda(device_id))\n option.check_resume(opt_F, resume_state['iter']) # check resume options\n else:\n resume_state = None\n\n #### mkdir and loggers\n if rank <= 0:\n if resume_state is None:\n util.mkdir_and_rename(\n opt_F['path']['experiments_root']) # rename experiment folder if exists\n util.mkdirs((path for key, path in opt_F['path'].items() if not key == 'experiments_root'\n and 'pretrain_model' not in key and 'resume' not in key))\n\n # config loggers. Before it, the log will not work\n util.setup_logger('base', opt_F['path']['log'], 'train_' + opt_F['name'], level=logging.INFO,\n screen=True, tofile=True)\n util.setup_logger('val', opt_F['path']['log'], 'val_' + opt_F['name'], level=logging.INFO,\n screen=True, tofile=True)\n logger = logging.getLogger('base')\n logger.info(option.dict2str(opt_F))\n # tensorboard logger\n if opt_F['use_tb_logger'] and 'debug' not in opt_F['name']:\n version = float(torch.__version__[0:3])\n if version >= 1.1: # PyTorch 1.1\n from torch.utils.tensorboard import SummaryWriter\n else:\n logger.info(\n 'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))\n from tensorboardX import SummaryWriter\n tb_logger = SummaryWriter(log_dir='../tb_logger/' + opt_F['name'])\n else:\n util.setup_logger('base', opt_F['path']['log'], 'train', level=logging.INFO, screen=True)\n logger = logging.getLogger('base')\n\n\n #### create train and val dataloader\n dataset_ratio = 200 # enlarge the size of each epoch\n for phase, dataset_opt in opt_F['datasets'].items():\n if phase == 'train':\n train_set = create_dataset(dataset_opt)\n train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))\n total_iters = int(opt_F['train']['niter'])\n total_epochs = int(math.ceil(total_iters / train_size))\n if opt_F['dist']:\n train_sampler = DistIterSampler(train_set, world_size, rank, dataset_ratio)\n total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))\n else:\n train_sampler = None\n train_loader = create_dataloader(train_set, dataset_opt, opt_F, train_sampler)\n if rank <= 0:\n logger.info('Number of train images: {:,d}, iters: {:,d}'.format(\n len(train_set), train_size))\n logger.info('Total epochs needed: {:d} for iters {:,d}'.format(\n total_epochs, total_iters))\n elif phase == 'val':\n val_set = create_dataset(dataset_opt)\n val_loader = create_dataloader(val_set, dataset_opt, opt_F, None)\n if rank <= 0:\n logger.info('Number of val images in [{:s}]: {:d}'.format(\n dataset_opt['name'], len(val_set)))\n else:\n raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))\n assert train_loader is not None\n assert val_loader is not None\n\n #### create model\n model_F = create_model(opt_F)\n\n #### resume training\n if resume_state:\n logger.info('Resuming training from epoch: {}, iter: {}.'.format(\n resume_state['epoch'], resume_state['iter']))\n\n start_epoch = resume_state['epoch']\n current_step = resume_state['iter']\n model_F.resume_training(resume_state) # handle optimizers and schedulers\n else:\n current_step = 0\n start_epoch = 0\n\n #### training\n logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))\n for epoch in range(start_epoch, total_epochs + 1):\n if opt_F['dist']:\n train_sampler.set_epoch(epoch)\n for _, train_data in enumerate(train_loader):\n current_step += 1\n if current_step > total_iters:\n break\n #### preprocessing for LR_img and kernel map\n prepro = util.SRMDPreprocessing(opt_F['scale'], pca_matrix, random=True, para_input=opt_F['code_length'],\n kernel=opt_F['kernel_size'], noise=False, cuda=True, sig=opt_F['sig'],\n sig_min=opt_F['sig_min'], sig_max=opt_F['sig_max'], rate_iso=1.0, scaling=3,\n rate_cln=0.2, noise_high=0.0)\n LR_img, ker_map = prepro(train_data['GT'])\n\n #### update learning rate, schedulers\n model_F.update_learning_rate(current_step, warmup_iter=opt_F['train']['warmup_iter'])\n\n #### training\n model_F.feed_data(train_data, LR_img, ker_map)\n model_F.optimize_parameters(current_step)\n\n #### log\n if current_step % opt_F['logger']['print_freq'] == 0:\n logs = model_F.get_current_log()\n message = ' '.format(\n epoch, current_step, model_F.get_current_learning_rate())\n for k, v in logs.items():\n message += '{:s}: {:.4e} '.format(k, v)\n # tensorboard logger\n if opt_F['use_tb_logger'] and 'debug' not in opt_F['name']:\n if rank <= 0:\n tb_logger.add_scalar(k, v, current_step)\n if rank <= 0:\n logger.info(message)\n\n # validation\n if current_step % opt_F['train']['val_freq'] == 0 and rank <= 0:\n avg_psnr = 0.0\n idx = 0\n for _, val_data in enumerate(val_loader):\n idx += 1\n #### preprocessing for LR_img and kernel map\n prepro = util.SRMDPreprocessing(opt_F['scale'], pca_matrix, random=True, para_input=opt_F['code_length'],\n kernel=opt_F['kernel_size'], noise=False, cuda=True, sig=opt_F['sig'],\n sig_min=opt_F['sig_min'], sig_max=opt_F['sig_max'], rate_iso=1.0, scaling=3,\n rate_cln=0.2, noise_high=0.0)\n LR_img, ker_map = prepro(val_data['GT'])\n\n model_F.feed_data(val_data, LR_img, ker_map)\n model_F.test()\n\n visuals = model_F.get_current_visuals()\n sr_img = util.tensor2img(visuals['SR']) # uint8\n gt_img = util.tensor2img(visuals['GT']) # uint8\n\n # Save SR images for reference\n img_name = os.path.splitext(os.path.basename(val_data['LQ_path'][0]))[0]\n #img_dir = os.path.join(opt_F['path']['val_images'], img_name)\n img_dir = os.path.join(opt_F['path']['val_images'], str(current_step))\n util.mkdir(img_dir)\n\n save_img_path = os.path.join(img_dir,'{:s}_{:d}.png'.format(img_name, current_step))\n util.save_img(sr_img, save_img_path)\n\n # calculate PSNR\n crop_size = opt_F['scale']\n gt_img = gt_img / 255.\n sr_img = sr_img / 255.\n cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n avg_psnr += util.calculate_psnr(cropped_sr_img * 255, cropped_gt_img * 255)\n\n avg_psnr = avg_psnr / idx\n\n # log\n logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))\n logger_val = logging.getLogger('val') # validation logger\n logger_val.info(' psnr: {:.6f}'.format(epoch, current_step, avg_psnr))\n # tensorboard logger\n if opt_F['use_tb_logger'] and 'debug' not in opt_F['name']:\n tb_logger.add_scalar('psnr', avg_psnr, current_step)\n\n\n #### save models and training states\n if current_step % opt_F['logger']['save_checkpoint_freq'] == 0:\n if rank <= 0:\n logger.info('Saving models and training states.')\n model_F.save(current_step)\n model_F.save_training_state(epoch, current_step)\n\n if rank <= 0:\n logger.info('Saving the final model.')\n model_F.save('latest')\n logger.info('End of SFTMD training.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"codes/train_SFTMD.py","file_name":"train_SFTMD.py","file_ext":"py","file_size_in_byte":11914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"165623352","text":"import struct\nimport ctypes\nimport sqlite3\nfrom log_util import LogUtil\n\ndef calculate_check_sum(send_buff, length):\n \"\"\"对send_buff中从0开始到length(不包括)计算checksum\n \"\"\"\n check_sum = 0\n for x in send_buff[0:length]:\n check_sum+=x\n check_sum=check_sum%256\n return check_sum\n\ndef packHeartbeatMessage():\n \"\"\"生成可发送的heartbeat消息\"\"\"\n message_header=(3, 0)\n send_buff = struct.pack('!II', *message_header)\n check_sum = calculate_check_sum(send_buff, 8)\n send_buff = send_buff + struct.pack('!I', check_sum)\n return send_buff\n\ndef packLogoutMessage():\n \"\"\"生成可发送的Logout消息\"\"\"\n message_header_struct = struct.Struct('!II')\n logout_struct = struct.Struct('!I200s')\n message_footer_struct = struct.Struct('!I')\n \n send_buff = ctypes.create_string_buffer(message_header_struct.size+logout_struct.size+message_footer_struct.size)\n \n bodyLength = logout_struct.size\n message_header =(2, bodyLength)\n message_header_struct.pack_into(send_buff, 0, *message_header)\n \n session_status=0 #active\n text= str.encode(\"\".ljust(200))\n \n logout_body = (session_status, text)\n logout_struct.pack_into(send_buff, message_header_struct.size, *logout_body)\n check_sum = calculate_check_sum(send_buff, message_header_struct.size+logout_struct.size)\n message_footer_struct.pack_into(send_buff, message_header_struct.size+logout_struct.size, check_sum)\n return send_buff\n\ndef get_message_type(buffer):\n header=struct.unpack_from('!II', buffer)\n return header[0]\n\ndef get_message_header(buffer):\n header=struct.unpack_from('!II', buffer)\n return header\n\nclass Message:\n header_len=8\n footer_len=4\n def __init__(self):\n self.message_type=0\n self.body_len=0\n self.message_str=''\n self.has_tag_name=True\n self.separator=','\n self.client_order_id=''\n self.order_status=''\n self.order_reject_reason='' \n \n def toString(self):\n return \"[MsgType=\"+str(self.message_type)+\"]\"+self.message_str\n \nclass MessageProcessor:\n def __init__(self, message_type, field_list):\n #field_list:\n #0:field_name;\n #1:format_str:\n #2:type,eg N,C;\n #3:type_len;\n #4:ref_field\n #5:struct_tag\n self.message_type=message_type\n self.field_list=field_list\n self.field_cnt=len(field_list)\n self.format_str='!'\n self.fixed_len=True\n self.nested=False\n for field in field_list:\n self.format_str=self.format_str+field[1]\n #TODO:should consider var_len&nested\n if field[5]=='LOOP_CNT':\n self.nested=True\n elif field[5]=='NEXT_VAR_LEN':\n self.fixed_len=False\n else:\n pass\n self.header_struct = struct.Struct('!II')\n self.body_struct = struct.Struct(self.format_str)\n self.footer_struct = struct.Struct('!I')\n self.buff_size=self.header_struct.size+self.body_struct.size+self.footer_struct.size\n \n def pack(self, values_str, separator=',', has_tag_name=True):\n #pack header\n header=(self.message_type, self.body_struct.size)\n buff=self.header_struct.pack(*header)\n \n #pack body\n value_list=values_str.split(separator)\n if len(value_list)!=self.field_cnt:\n #TODO:return error\n LogUtil.error(\"Error Format:\"+str(self.message_type)+\":\"+str(len(value_list))+\":\"+str(self.field_cnt))\n #return\n target_value_list=[] \n for field_index in range(0, self.field_cnt):\n if has_tag_name:\n field_value=value_list[field_index].split('=')[1]\n else:\n field_value=value_list[field_index]\n if self.field_list[field_index][2]=='C':\n target_value=str.encode(field_value.ljust(self.field_list[field_index][3]))\n elif self.field_list[field_index][2]=='N':\n target_value=int(field_value)\n else: \n target_value=str.encode(field_value.ljust(self.field_list[field_index][3]))\n target_value_list.append(target_value) \n \n buff=buff+self.body_struct.pack(*target_value_list)\n check_sum = calculate_check_sum(buff, self.header_struct.size+self.body_struct.size)\n buff=buff+self.footer_struct.pack(check_sum)\n \n return buff\n def unpack(self, buff, separator=',', write_tag_name=True):\n if self.nested:\n return self.unpack_nested(buff, separator,write_tag_name)\n elif self.fixed_len:\n return self.unpack_fixedlen(buff, separator,write_tag_name)\n else: \n #return self.unpack_varlen(buff, separator,write_tag_name)\n return self.unpack_nested(buff, separator,write_tag_name)\n \n def unpack_fixedlen(self, buff, separator=',', write_tag_name=True):\n message=Message()\n message.separator=separator\n message.has_tag_name=write_tag_name\n message.message_type=self.message_type\n \n (message_type, body_len)=self.header_struct.unpack_from(buff)\n if message_type!=self.message_type:\n #TODO:error format check\n LogUtil.error(\"Error message_type,expected=\"+str(self.message_type)+\",act_type=\"+str(message_type))\n return\n #TODO:check buff size\n #TODO:check checksum \n body_buff=buff[self.header_struct.size:]\n bytes_processed=0\n body_tuple=self.body_struct.unpack_from(body_buff)\n if len(body_tuple)!=self.field_cnt:\n return\n \n rtn_str=''\n for field_index in range(len(body_tuple)):\n if field_index>0:\n rtn_str=rtn_str+separator\n if write_tag_name:\n rtn_str=rtn_str+self.field_list[field_index][0]+'='\n \n if self.field_list[field_index][2]=='C':\n str_value=bytes.decode(body_tuple[field_index])\n elif self.field_list[field_index][2]=='N':\n str_value=str(body_tuple[field_index])\n else: \n str_value=bytes.decode(body_tuple[field_index])\n bytes_processed=bytes_processed+self.field_list[field_index][3]\n \n rtn_str=rtn_str+str_value\n #TODO add a column to message_body_def to indicate whether the column is client Order ID\n if self.field_list[field_index][0]=='ClOrdID':\n message.client_order_id=bytes.decode(body_tuple[field_index])\n elif self.field_list[field_index][0]=='OrdStatus':\n message.order_status=bytes.decode(body_tuple[field_index])\n elif self.field_list[field_index][0]=='OrdRejReason':\n message.order_reject_reason=(body_tuple[field_index])\n message.message_str=rtn_str\n if bytes_processed!=body_len:\n LogUtil.error(\"bytes_process!=body_len,mesType=\"+str(message_type)+\",body_len=\"+str(body_len)+\",bytes_processed=\"+str(bytes_processed))\n\n return message\n def unpack_varlen(self, buff, separator=',', write_tag_name=True):\n message=Message()\n message.separator=separator\n message.has_tag_name=write_tag_name\n message.message_type=self.message_type\n \n (message_type, body_len)=self.header_struct.unpack_from(buff)\n if message_type!=self.message_type:\n #TODO:error format check\n LogUtil.error(\"Error message_type,expected=\"+str(self.message_type)+\",act_type=\"+str(message_type))\n return\n #TODO:check buff size\n #TODO:check checksum \n bytes_processed=0\n body_left_buff=buff[self.header_struct.size:]\n #body_tuple=self.body_struct.unpack_from(body_buff)\n str_value_list=[]\n rtn_str=''\n #field_list:\n #0:field_name;\n #1:format_str:\n #2:type,eg N,C;\n #3:type_len;\n #4:ref_var_len_field\n for field_index in range(len(self.field_list)):\n if field_index>0:\n rtn_str=rtn_str+separator\n if write_tag_name:\n rtn_str=rtn_str+self.field_list[field_index][0]+'='\n \n if self.field_list[field_index][4]>0:\n #self.field_list[field_index][4] is the index of field which holding the length\n format_str=\"!\"+str_value_list[self.field_list[field_index][4]]+\"s\"\n bytes_len=int(str_value_list[self.field_list[field_index][4]])\n LogUtil.debug(\"field:\"+self.field_list[field_index][0]+\" format:\"+format_str)\n else:\n format_str=\"!\"+self.field_list[field_index][1]\n bytes_len=self.field_list[field_index][3]\n LogUtil.debug(\"field:\"+self.field_list[field_index][0]+\" format:\"+format_str)\n field_value=struct.unpack_from(format_str, body_left_buff)\n if self.field_list[field_index][2]=='C':\n field_str_value=bytes.decode(field_value[0])\n elif self.field_list[field_index][2]=='N':\n field_str_value=str(field_value[0])\n else: \n field_str_value=bytes.decode(field_value[0])\n \n str_value_list.append(field_str_value)\n rtn_str=rtn_str+field_str_value\n #TODO add a column to message_body_def to indicate whether the column is client Order ID\n if self.field_list[field_index][0]=='ClOrdID':\n message.client_order_id=field_str_value\n elif self.field_list[field_index][0]=='OrdStatus':\n message.order_status=field_str_value\n elif self.field_list[field_index][0]=='OrdRejReason':\n message.order_reject_reason=field_value\n bytes_processed=bytes_processed+bytes_len \n body_left_buff=body_left_buff[bytes_len:]\n \n message.message_str=rtn_str\n if bytes_processed!=body_len:\n LogUtil.error(\"bytes_process!=body_len,mesType=\"+str(message_type)+\",body_len=\"+str(body_len)+\",bytes_processed=\"+str(bytes_processed))\n\n return message\n\n def unpack_nested(self, buff, separator=',', write_tag_name=True):\n message=Message()\n message.separator=separator\n message.has_tag_name=write_tag_name\n message.message_type=self.message_type\n \n (message_type, body_len)=self.header_struct.unpack_from(buff)\n if message_type!=self.message_type:\n #TODO:error format check\n LogUtil.error(\"Error message_type,expected=\"+str(self.message_type)+\",act_type=\"+str(message_type))\n return\n #TODO:check buff size\n #TODO:check checksum \n \n body_left_buff=buff[self.header_struct.size:]\n bytes_processed=0\n #body_tuple=self.body_struct.unpack_from(body_buff)\n str_value_list=[]\n rtn_str=''\n #field_list:\n #0:field_name;\n #1:format_str:\n #2:type,eg N,C;\n #3:type_len;\n #4:ref_field\n #5:struct_tag\n field_index=0\n #support we enounter a body begin tag\n (loop_cnt_field, loop_cnt, exec_cnt)=(0, 1, 1)\n (var_len_indictor_field, var_len)=(0, 0)\n loop_list=[]\n while field_index0:\n rtn_str=rtn_str+separator\n if write_tag_name:\n rtn_str=rtn_str+self.field_list[field_index][0]+'='\n \n if self.field_list[field_index][5]=='VAR_LEN':\n #self.field_list[field_index][4]-1 is the index of field which holding the length\n if self.field_list[field_index][4]!=var_len_indictor_field:\n LogUtil.error(\"Error getting VAR_LEN:field_index=\"+str(field_index)+\" indict field=\"+str(var_len_indictor_field))\n format_str=\"!\"+str(var_len)+\"s\"\n bytes_len=var_len\n LogUtil.debug(\"field:\"+self.field_list[field_index][0]+\" format:\"+format_str)\n else:\n format_str=\"!\"+self.field_list[field_index][1]\n bytes_len=self.field_list[field_index][3]\n LogUtil.debug(\"field:\"+self.field_list[field_index][0]+\" format:\"+format_str)\n field_value=struct.unpack_from(format_str, body_left_buff)\n \n if self.field_list[field_index][5]=='NEXT_VAR_LEN':\n var_len=field_value[0]\n var_len_indictor_field=field_index\n \n if self.field_list[field_index][2]=='C':\n field_str_value=bytes.decode(field_value[0])\n elif self.field_list[field_index][2]=='N':\n field_str_value=str(field_value[0])\n else:\n field_str_value=bytes.decode(field_value[0])\n LogUtil.error(\"unknown type category:\"+self.field_list[field_index][2])\n \n str_value_list.append(field_str_value)\n rtn_str=rtn_str+field_str_value\n #TODO add a column to message_body_def to indicate whether the column is client Order ID\n if self.field_list[field_index][0]=='ClOrdID':\n message.client_order_id=field_str_value\n elif self.field_list[field_index][0]=='OrdStatus':\n message.order_status=field_str_value\n elif self.field_list[field_index][0]=='OrdRejReason':\n message.order_reject_reason=field_value[0]\n bytes_processed=bytes_processed+bytes_len \n body_left_buff=body_left_buff[bytes_len:]\n \n if self.field_list[field_index][5]=='LOOP_CNT':\n loop_list.append((field_index, loop_cnt, exec_cnt))\n LogUtil.debug(\"push:field_index=\"+str(field_index)+ \",loop=\"+str(loop_cnt)+ \",exec=\"+str(exec_cnt))\n loop_cnt=field_value[0]\n exec_cnt=0\n field_index=field_index+1 \n elif self.field_list[field_index][5] in('LOOP_BEGIN'):\n if exec_cntloop_cnt:\n LogUtil.critical(\"LOOP_END error: exec_cnt=\"+str(exec_cnt)+\",loop_cnt=\"+str(loop_cnt))\n else:\n #TODO:raiseExceptions\n LogUtil.error(\"unsupported struct tag:\"+self.field_list[field_index][5])\n break\n message.message_str=rtn_str\n if bytes_processed!=body_len:\n LogUtil.error(\"bytes_process!=body_len,mesType=\"+str(message_type)+\",body_len=\"+str(body_len)+\",bytes_processed=\"+str(bytes_processed))\n return message\n\n\nclass MessageProcessorFactory:\n def __init__(self):\n self.message_processors={}\n def load_from_db(self, db_file, message_type_list):\n conn=sqlite3.connect(db_file)\n curs=conn.cursor()\n if len(message_type_list)==0:\n curs.execute('select distinct message_type from message_body_def order by message_type')\n for row in curs.fetchall():\n message_type_list.append(row[0])\n query='''select t.message_type,t.field_name,t.field_desc,t.format_string,t.type_category,t.type_len,t.ref_field,t.field_order,t.struct_tag \n from vw_message_body_def t\n where t.message_type=?\n order by t.field_order\n '''\n for message_type in message_type_list:\n field_list=[]\n field_cnt=0\n curs.execute(query, [message_type])\n for row in curs.fetchall():\n field_cnt=field_cnt+1\n LogUtil.info(row)\n if row[7]!=field_cnt-1:\n #TODO:raiseExceptions\n LogUtil.error(\"field list disorder:mesType=\"+str(message_type)+\" field:\"+str(field_cnt))\n field_list.append((row[1], row[3], row[4], row[5], row[6], row[8]))\n message_processor=MessageProcessor(message_type, field_list)\n self.message_processors[message_type]=message_processor\n \n def build_message_processor(self, message_type):\n return self.message_processors[message_type]\n \n \n","sub_path":"msg.py","file_name":"msg.py","file_ext":"py","file_size_in_byte":17231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"163030816","text":"from contextlib import suppress\nfrom datetime import timedelta\nimport os.path\nimport signal\n\nimport sh\n\nfrom recipes.helpers import set_sh_defaults\n\n\n@set_sh_defaults\ndef execute(working_dir: str, frequency: str, duration: timedelta, sh=sh):\n raw_path = os.path.join(working_dir, \"signal.raw\")\n signal_path = os.path.join(working_dir, \"signal.wav\")\n product_path = os.path.join(working_dir, \"product.png\")\n log_path = os.path.join(working_dir, \"session.log\")\n\n sample_rate = 48000\n\n # Let's log the operations done by the tools to a log file. We need to flush it\n # frequently, because this file stream is also used capture tools output. Without\n # flush, the logging order gets completely messed up.\n logfile = open(log_path, \"w\")\n logfile.write(\"---rtl_fm log-------\\n\")\n logfile.flush()\n\n # Run rtl_fm/rx_fm - this records the actual samples from the RTL device\n with suppress(sh.TimeoutException):\n sh.rtl_fm(\n # Specify frequency (in Hz, e.g. 137MHz)\n \"-f\", frequency,\n # Specify sampling rate (e.g. 48000 Hz)\n \"-s\", sample_rate,\n # Maximal possible value. Probably is wrong for other SDR then rtl-sdr\n \"-g\", 49.6,\n # Copy-paste from suspects www\n \"-p\", 1,\n # Higher quality downsampling - possible value 0 or 9. 9 is experimental.\n \"-F\", 9,\n # Enable bias-T\n \"-T\",\n # How arctan is computed. We don't test other options.\n \"-A\", \"fast\",\n # dc blocking filter (?)\n \"-E\", \"DC\",\n # Output to pipe, optional in this command\n raw_path,\n _timeout=duration.total_seconds(),\n _timeout_signal=signal.SIGTERM,\n\n # rtl_fm and rx_fm both print messages on stderr\n _err=logfile\n )\n logfile.flush()\n\n logfile.write(\"---sox log-------\\n\")\n logfile.flush()\n\n # Run sox - this convert raw samples into audible WAV\n sh.sox( # Type of input\n \"-t\", \"raw\",\n # Sample size in bits\n \"-b16\",\n # Signed integer encoding\n \"-es\",\n \"-r\", sample_rate,\n # Number of channels of audio data - 1 - mono\n \"-c1\",\n # Verbosity level (0 - silence, 1 - failure messages, 2 - warnings, 3 - processing phases, 4 - debug)\n \"-V3\",\n # Read from the raw file (instead of stdin via pipe)\n raw_path,\n # Output path\n signal_path,\n # Resampling rate\n \"rate\", \"11025\",\n _out=logfile\n )\n logfile.flush()\n\n logfile.write(\"---noaa-apt log-------\\n\")\n logfile.flush()\n\n # Run noaa_apt - this decodes APT from the audio file into PNG image.\n sh.noaa_apt(\n \"-o\", product_path,\n \"--false-color\", \"--contrast\", \"telemetry\",\n signal_path,\n _out=logfile\n )\n logfile.flush()\n logfile.close()\n\n return [\n (\"SIGNAL\", signal_path),\n (\"PRODUCT\", product_path),\n (\"LOG\", log_path),\n (\"RAW\", raw_path)\n ]\n","sub_path":"station/recipes/noaa_apt.py","file_name":"noaa_apt.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"393444372","text":"\"\"\"Tools for more complex analysis of SELECT statements.\"\"\"\nfrom collections import defaultdict\nfrom typing import Dict, Generator, List, NamedTuple, Optional, Union\n\nfrom cached_property import cached_property\n\nfrom sqlfluff.core.dialects.common import AliasInfo\nfrom sqlfluff.core.dialects.base import Dialect\nfrom sqlfluff.core.parser.segments.base import BaseSegment\nfrom sqlfluff.core.rules.analysis.select import get_select_statement_info\n\n\nclass WildcardInfo(NamedTuple):\n \"\"\"Structure returned by SelectCrawler.get_wildcard_info().\"\"\"\n\n segment: BaseSegment\n tables: List[str]\n\n\nclass SelectCrawler:\n \"\"\"Class for recursive dependency analysis related to SELECT statements.\n\n This class is a wrapper for select.get_select_statement_info(), but it adds\n recursive dependency walking.\n \"\"\"\n\n @classmethod\n def gather(\n cls, segment: BaseSegment, dialect: Dialect\n ) -> Dict[Optional[str], List[\"SelectCrawler\"]]:\n \"\"\"Find top-level SELECTs and CTEs, return info.\"\"\"\n queries = defaultdict(list)\n # We specify recurse_into=False because we only want top-level select\n # statmeents and CTEs. We'll deal with nested selects later as needed,\n # when processing their top-level parent.\n for select_statement in segment.recursive_crawl(\n \"select_statement\", recurse_into=False\n ):\n select_name = cls._get_name_if_cte(select_statement, segment)\n queries[select_name].append(SelectCrawler(select_statement, dialect))\n return dict(queries)\n\n @classmethod\n def get(\n cls,\n segment: BaseSegment,\n queries: Dict[str, List[\"SelectCrawler\"]],\n dialect: Dialect,\n ) -> Union[str, List[\"SelectCrawler\"]]:\n \"\"\"Find SELECTs, table refs, or value table function calls in segment.\n\n If we find a SELECT, return info list. Otherwise, return table name\n or function call string.\n \"\"\"\n for o in cls.crawl(segment, queries, dialect, False):\n return o\n assert False, \"Should be unreachable\"\n\n @classmethod\n def crawl(\n cls,\n segment: BaseSegment,\n queries: Dict[str, List[\"SelectCrawler\"]],\n dialect: Dialect,\n recurse_into=True,\n ) -> Generator[Union[str, List[\"SelectCrawler\"]], None, None]:\n \"\"\"Find SELECTs, table refs, or value table function calls in segment.\n\n For each SELECT, yield a list of SelectCrawlers. As we find table\n references or function call strings, yield those.\n \"\"\"\n buff = []\n for seg in segment.recursive_crawl(\n \"table_reference\", \"select_statement\", recurse_into=recurse_into\n ):\n if seg is segment:\n # If we are starting with a select_statement, recursive_crawl()\n # returns the statement itself. Skip that.\n continue\n\n if seg.is_type(\"table_reference\"):\n if not seg.is_qualified() and seg.raw in queries:\n # It's a CTE.\n # :TRICKY: Pop the CTE from \"queries\" to help callers avoid\n # infinite recursion. We could make this behavior optional\n # someday, if necessary.\n yield queries.pop(seg.raw)\n else:\n # It's an external table.\n yield seg.raw\n else:\n assert seg.is_type(\"select_statement\")\n buff.append(SelectCrawler(seg, dialect))\n if not buff:\n # If we reach here, the SELECT may be querying from a value table\n # function, e.g. UNNEST(). For our purposes, this is basically the\n # same as an external table. Return the \"table\" part as a string.\n table_expr = segment.get_child(\"table_expression\")\n if table_expr:\n yield table_expr.raw\n yield buff\n\n def __init__(self, select_statement, dialect):\n self.select_statement = select_statement\n self.dialect = dialect\n\n @cached_property\n def select_info(self):\n \"\"\"Returns SelectStatementColumnsAndTables on the SELECT.\"\"\"\n result = get_select_statement_info(\n self.select_statement, self.dialect, early_exit=False\n )\n return result\n\n def find_alias(self, table: str) -> Optional[AliasInfo]:\n \"\"\"Find corresponding table_aliases entry (if any) matching \"table\".\"\"\"\n alias_info = [\n t\n for t in self.select_info.table_aliases\n if t.aliased and t.ref_str == table\n ]\n assert len(alias_info) <= 1\n return alias_info[0] if alias_info else None\n\n def get_wildcard_info(self) -> List[WildcardInfo]:\n \"\"\"Find wildcard (*) targets in the SELECT.\"\"\"\n buff = []\n for seg in self.select_info.select_targets:\n if seg.get_child(\"wildcard_expression\"):\n if \".\" in seg.raw:\n # The wildcard specifies a target table.\n table = seg.raw.rsplit(\".\", 1)[0]\n buff.append(WildcardInfo(seg, [table]))\n else:\n # The wildcard is unqualified (i.e. does not specify a\n # table). This means to include all columns from all the\n # tables in the query.\n buff.append(\n WildcardInfo(\n seg,\n [\n alias_info.ref_str\n if alias_info.aliased\n else alias_info.from_expression_element.raw\n for alias_info in self.select_info.table_aliases\n ],\n )\n )\n return buff\n\n @staticmethod\n def _get_name_if_cte(\n select_statement: BaseSegment, ancestor_segment: BaseSegment\n ) -> Optional[str]:\n \"\"\"Return name if CTE. If top-level, return None.\"\"\"\n cte = None\n path_to = ancestor_segment.path_to(select_statement)\n for seg in path_to:\n if seg.is_type(\"common_table_expression\"):\n cte = seg\n break\n select_name = cte.segments[0].raw if cte else None\n return select_name\n","sub_path":"src/sqlfluff/core/rules/analysis/select_crawler.py","file_name":"select_crawler.py","file_ext":"py","file_size_in_byte":6359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"34360736","text":"#\n# Base interface class\n#\n\nimport pybamm\n\n\nclass BaseInterface(pybamm.BaseSubModel):\n \"\"\"\n Base class for interfacial currents\n\n Parameters\n ----------\n param : parameter class\n The parameters to use for this submodel\n domain : str\n The domain to implement the model, either: 'Negative' or 'Positive'.\n reaction : str\n The name of the reaction being implemented\n\n **Extends:** :class:`pybamm.BaseSubModel`\n \"\"\"\n\n def __init__(self, param, domain, reaction):\n super().__init__(param, domain)\n if reaction == \"lithium-ion main\":\n self.reaction_name = \"\" # empty reaction name for the main reaction\n self.Reaction_icd = \"Interfacial current density\"\n elif reaction == \"lead-acid main\":\n self.reaction_name = \"\" # empty reaction name for the main reaction\n self.Reaction_icd = \"Interfacial current density\"\n elif reaction == \"lead-acid oxygen\":\n self.reaction_name = \" oxygen\"\n self.Reaction_icd = \"Oxygen interfacial current density\"\n elif reaction == \"lithium-ion oxygen\":\n self.reaction_name = \" oxygen\"\n self.Reaction_icd = \"Oxygen interfacial current density\"\n elif reaction == \"SEI\":\n self.reaction_name = \" SEI\"\n self.Reaction_icd = \"SEI interfacial current density\"\n elif reaction == \"lithium plating\":\n self.reaction_name = \" lithium plating\"\n self.Reaction_icd = \"Lithium plating interfacial current density\"\n self.reaction = reaction\n\n def _get_exchange_current_density(self, variables):\n \"\"\"\n A private function to obtain the exchange current density\n\n Parameters\n ----------\n variables: dict\n The variables in the full model.\n\n Returns\n -------\n j0 : :class: `pybamm.Symbol`\n The exchange current density.\n \"\"\"\n c_e = variables[self.domain + \" electrolyte concentration\"]\n T = variables[self.domain + \" electrode temperature\"]\n\n if self.reaction == \"lithium-ion main\":\n c_s_surf = variables[self.domain + \" particle surface concentration\"]\n\n # If variable was broadcast, take only the orphan\n if (\n isinstance(c_s_surf, pybamm.Broadcast)\n and isinstance(c_e, pybamm.Broadcast)\n and isinstance(T, pybamm.Broadcast)\n ):\n c_s_surf = c_s_surf.orphans[0]\n c_e = c_e.orphans[0]\n T = T.orphans[0]\n if self.domain == \"Negative\":\n j0 = self.param.j0_n(c_e, c_s_surf, T) / self.param.C_r_n\n elif self.domain == \"Positive\":\n j0 = (\n self.param.gamma_p\n * self.param.j0_p(c_e, c_s_surf, T)\n / self.param.C_r_p\n )\n\n elif self.reaction == \"lead-acid main\":\n # If variable was broadcast, take only the orphan\n if isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast):\n c_e = c_e.orphans[0]\n T = T.orphans[0]\n if self.domain == \"Negative\":\n j0 = self.param.j0_n(c_e, T)\n elif self.domain == \"Positive\":\n j0 = self.param.j0_p(c_e, T)\n\n elif self.reaction == \"lead-acid oxygen\":\n # If variable was broadcast, take only the orphan\n if isinstance(c_e, pybamm.Broadcast) and isinstance(T, pybamm.Broadcast):\n c_e = c_e.orphans[0]\n T = T.orphans[0]\n if self.domain == \"Negative\":\n j0 = pybamm.Scalar(0)\n elif self.domain == \"Positive\":\n j0 = self.param.j0_p_Ox(c_e, T)\n else:\n j0 = pybamm.Scalar(0)\n\n return j0\n\n def _get_open_circuit_potential(self, variables):\n \"\"\"\n A private function to obtain the open circuit potential and entropic change\n\n Parameters\n ----------\n variables: dict\n The variables in the full model.\n\n Returns\n -------\n ocp : :class:`pybamm.Symbol`\n The open-circuit potential\n dUdT : :class:`pybamm.Symbol`\n The entropic change in open-circuit potential due to temperature\n\n \"\"\"\n\n if self.reaction == \"lithium-ion main\":\n c_s_surf = variables[self.domain + \" particle surface concentration\"]\n T = variables[self.domain + \" electrode temperature\"]\n\n # If variable was broadcast, take only the orphan\n if isinstance(c_s_surf, pybamm.Broadcast) and isinstance(\n T, pybamm.Broadcast\n ):\n c_s_surf = c_s_surf.orphans[0]\n T = T.orphans[0]\n\n if self.domain == \"Negative\":\n ocp = self.param.U_n(c_s_surf, T)\n dUdT = self.param.dUdT_n(c_s_surf)\n elif self.domain == \"Positive\":\n ocp = self.param.U_p(c_s_surf, T)\n dUdT = self.param.dUdT_p(c_s_surf)\n elif self.reaction == \"lead-acid main\":\n c_e = variables[self.domain + \" electrolyte concentration\"]\n # If c_e was broadcast, take only the orphan\n if isinstance(c_e, pybamm.Broadcast):\n c_e = c_e.orphans[0]\n if self.domain == \"Negative\":\n ocp = self.param.U_n(c_e, self.param.T_init)\n elif self.domain == \"Positive\":\n ocp = self.param.U_p(c_e, self.param.T_init)\n dUdT = pybamm.Scalar(0)\n\n elif self.reaction == \"lead-acid oxygen\":\n if self.domain == \"Negative\":\n ocp = self.param.U_n_Ox\n elif self.domain == \"Positive\":\n ocp = self.param.U_p_Ox\n dUdT = pybamm.Scalar(0)\n\n else:\n ocp = pybamm.Scalar(0)\n dUdT = pybamm.Scalar(0)\n\n return ocp, dUdT\n\n def _get_number_of_electrons_in_reaction(self):\n \"\"\"Returns the number of electrons in the reaction.\"\"\"\n if self.reaction in [\"lead-acid main\", \"lithium-ion main\"]:\n if self.domain == \"Negative\":\n return self.param.ne_n\n elif self.domain == \"Positive\":\n return self.param.ne_p\n elif self.reaction == \"lead-acid oxygen\":\n return self.param.ne_Ox\n else:\n return pybamm.Scalar(0)\n\n def _get_electrolyte_reaction_signed_stoichiometry(self):\n \"\"\"Returns the number of electrons in the reaction.\"\"\"\n if self.reaction in [\"lithium-ion main\", \"SEI\", \"lithium plating\"]:\n # Both the main reaction current contribute to the electrolyte reaction\n # current\n return pybamm.Scalar(1), pybamm.Scalar(1)\n elif self.reaction == \"lead-acid main\":\n return self.param.s_plus_n_S, self.param.s_plus_p_S\n elif self.reaction == \"lead-acid oxygen\":\n return self.param.s_plus_Ox, self.param.s_plus_Ox\n else:\n return pybamm.Scalar(0), pybamm.Scalar(0)\n\n def _get_delta_phi(self, variables):\n \"\"\"Calculate delta_phi, and derived variables, using phi_s and phi_e.\"\"\"\n phi_s = variables[self.domain + \" electrode potential\"]\n phi_e = variables[self.domain + \" electrolyte potential\"]\n delta_phi = phi_s - phi_e\n variables.update(\n self._get_standard_surface_potential_difference_variables(delta_phi)\n )\n return variables\n\n def _get_average_total_interfacial_current_density(self, variables):\n \"\"\"\n Method to obtain the average total interfacial current density.\n\n Note: for lithium-ion models this is only exact if all the particles have\n the same radius. For the current set of models implemeted in pybamm,\n having the radius as a function of through-cell distance only makes sense\n for the DFN model. In the DFN, the correct average interfacial current density\n is computed in 'base_kinetics.py' by averaging the actual interfacial current\n density. The approximation here is only used to get the approximate constant\n additional resistance term for the \"average\" SEI film resistance model\n (if using), where only negligible errors will be introduced.\n\n For \"leading-order\" and \"composite\" submodels (as used in the SPM and SPMe)\n there is only a single particle radius, so this method returns correct result.\n \"\"\"\n\n i_boundary_cc = variables[\"Current collector current density\"]\n\n if self.domain == \"Negative\":\n j_total_average = i_boundary_cc / self.param.l_n\n\n elif self.domain == \"Positive\":\n j_total_average = -i_boundary_cc / self.param.l_p\n\n return j_total_average\n\n def _get_standard_interfacial_current_variables(self, j):\n\n i_typ = self.param.i_typ\n L_x = self.param.L_x\n if self.domain == \"Negative\":\n j_scale = i_typ / (self.param.a_n_typ * L_x)\n elif self.domain == \"Positive\":\n j_scale = i_typ / (self.param.a_p_typ * L_x)\n\n # Average, and broadcast if necessary\n if j.domain == []:\n j_av = j\n j = pybamm.FullBroadcast(j, self.domain_for_broadcast, \"current collector\")\n elif j.domain == [\"current collector\"]:\n j_av = j\n j = pybamm.PrimaryBroadcast(j, self.domain_for_broadcast)\n else:\n j_av = pybamm.x_average(j)\n\n variables = {\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" interfacial current density\": j,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" interfacial current density\": j_av,\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" interfacial current density [A.m-2]\": j_scale * j,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" interfacial current density [A.m-2]\": j_scale * j_av,\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" interfacial current density per volume [A.m-3]\": i_typ / L_x * j,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" interfacial current density per volume [A.m-3]\": i_typ / L_x * j_av,\n }\n\n return variables\n\n def _get_standard_total_interfacial_current_variables(self, j_tot_av):\n\n i_typ = self.param.i_typ\n L_x = self.param.L_x\n if self.domain == \"Negative\":\n j_scale = i_typ / (self.param.a_n_typ * L_x)\n elif self.domain == \"Positive\":\n j_scale = i_typ / (self.param.a_p_typ * L_x)\n\n variables = {\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode total interfacial current density\": j_tot_av,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode total interfacial current density [A.m-2]\": j_scale\n * j_tot_av,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode total interfacial current density per volume [A.m-3]\": i_typ\n / L_x\n * j_tot_av,\n }\n\n return variables\n\n def _get_standard_whole_cell_interfacial_current_variables(self, variables):\n \"\"\"\n Get variables associated with interfacial current over the whole cell domain\n This function also automatically increments the \"total source term\" variables\n \"\"\"\n i_typ = self.param.i_typ\n L_x = self.param.L_x\n j_n_scale = i_typ / (self.param.a_n_typ * L_x)\n j_p_scale = i_typ / (self.param.a_p_typ * L_x)\n\n j_n_av = variables[\n \"X-averaged negative electrode\"\n + self.reaction_name\n + \" interfacial current density\"\n ]\n j_p_av = variables[\n \"X-averaged positive electrode\"\n + self.reaction_name\n + \" interfacial current density\"\n ]\n\n j_n = variables[\n \"Negative electrode\" + self.reaction_name + \" interfacial current density\"\n ]\n j_s = pybamm.FullBroadcast(0, \"separator\", \"current collector\")\n j_p = variables[\n \"Positive electrode\" + self.reaction_name + \" interfacial current density\"\n ]\n j = pybamm.Concatenation(j_n, j_s, j_p)\n j_dim = pybamm.Concatenation(j_n_scale * j_n, j_s, j_p_scale * j_p)\n\n variables.update(\n {\n self.Reaction_icd: j,\n self.Reaction_icd + \" [A.m-2]\": j_dim,\n self.Reaction_icd + \" per volume [A.m-3]\": i_typ / L_x * j,\n }\n )\n\n a_n = variables[\"Negative electrode surface area to volume ratio\"]\n a_p = variables[\"Positive electrode surface area to volume ratio\"]\n a = pybamm.Concatenation(\n a_n, pybamm.FullBroadcast(0, \"separator\", \"current collector\"), a_p\n )\n\n s_n, s_p = self._get_electrolyte_reaction_signed_stoichiometry()\n s = pybamm.Concatenation(\n pybamm.FullBroadcast(s_n, \"negative electrode\", \"current collector\"),\n pybamm.FullBroadcast(0, \"separator\", \"current collector\"),\n pybamm.FullBroadcast(s_p, \"positive electrode\", \"current collector\"),\n )\n\n variables[\"Sum of electrolyte reaction source terms\"] += a * s * j\n variables[\"Sum of negative electrode electrolyte reaction source terms\"] += (\n a_n * s_n * j_n\n )\n variables[\"Sum of positive electrode electrolyte reaction source terms\"] += (\n a_p * s_p * j_p\n )\n variables[\n \"Sum of x-averaged negative electrode electrolyte reaction source terms\"\n ] += pybamm.x_average(a_n * s_n * j_n)\n variables[\n \"Sum of x-averaged positive electrode electrolyte reaction source terms\"\n ] += pybamm.x_average(a_p * s_p * j_p)\n\n variables[\"Sum of interfacial current densities\"] += j\n variables[\"Sum of negative electrode interfacial current densities\"] += j_n\n variables[\"Sum of positive electrode interfacial current densities\"] += j_p\n variables[\n \"Sum of x-averaged negative electrode interfacial current densities\"\n ] += j_n_av\n variables[\n \"Sum of x-averaged positive electrode interfacial current densities\"\n ] += j_p_av\n\n return variables\n\n def _get_standard_exchange_current_variables(self, j0):\n\n i_typ = self.param.i_typ\n L_x = self.param.L_x\n if self.domain == \"Negative\":\n j_scale = i_typ / (self.param.a_n_typ * L_x)\n elif self.domain == \"Positive\":\n j_scale = i_typ / (self.param.a_p_typ * L_x)\n\n # Average, and broadcast if necessary\n if j0.domain == []:\n j0_av = j0\n j0 = pybamm.FullBroadcast(\n j0, self.domain_for_broadcast, \"current collector\"\n )\n elif j0.domain == [\"current collector\"]:\n j0_av = j0\n j0 = pybamm.PrimaryBroadcast(j0, self.domain_for_broadcast)\n else:\n j0_av = pybamm.x_average(j0)\n\n variables = {\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" exchange current density\": j0,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" exchange current density\": j0_av,\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" exchange current density [A.m-2]\": j_scale * j0,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" exchange current density [A.m-2]\": j_scale * j0_av,\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" exchange current density per volume [A.m-3]\": i_typ / L_x * j0,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" exchange current density per volume [A.m-3]\": i_typ / L_x * j0_av,\n }\n\n return variables\n\n def _get_standard_whole_cell_exchange_current_variables(self, variables):\n\n i_typ = self.param.i_typ\n L_x = self.param.L_x\n j_n_scale = i_typ / (self.param.a_n_typ * L_x)\n j_p_scale = i_typ / (self.param.a_p_typ * L_x)\n\n j0_n = variables[\n \"Negative electrode\" + self.reaction_name + \" exchange current density\"\n ]\n j0_s = pybamm.FullBroadcast(0, \"separator\", \"current collector\")\n j0_p = variables[\n \"Positive electrode\" + self.reaction_name + \" exchange current density\"\n ]\n j0 = pybamm.Concatenation(j0_n, j0_s, j0_p)\n j0_dim = pybamm.Concatenation(j_n_scale * j0_n, j0_s, j_p_scale * j0_p)\n\n if self.reaction_name == \"\":\n variables = {\n \"Exchange current density\": j0,\n \"Exchange current density [A.m-2]\": j0_dim,\n \"Exchange current density per volume [A.m-3]\": i_typ / L_x * j0,\n }\n else:\n reaction_name = self.reaction_name[1:].capitalize()\n variables = {\n reaction_name + \" exchange current density\": j0,\n reaction_name + \" exchange current density [A.m-2]\": j0_dim,\n reaction_name\n + \" exchange current density per volume [A.m-3]\": i_typ / L_x * j0,\n }\n\n return variables\n\n def _get_standard_overpotential_variables(self, eta_r):\n\n pot_scale = self.param.potential_scale\n # Average, and broadcast if necessary\n eta_r_av = pybamm.x_average(eta_r)\n if eta_r.domain == []:\n eta_r = pybamm.FullBroadcast(\n eta_r, self.domain_for_broadcast, \"current collector\"\n )\n elif eta_r.domain == [\"current collector\"]:\n eta_r = pybamm.PrimaryBroadcast(eta_r, self.domain_for_broadcast)\n\n variables = {\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" reaction overpotential\": eta_r,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" reaction overpotential\": eta_r_av,\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" reaction overpotential [V]\": eta_r * pot_scale,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" reaction overpotential [V]\": eta_r_av * pot_scale,\n }\n\n return variables\n\n def _get_standard_sei_film_overpotential_variables(self, eta_sei):\n\n pot_scale = self.param.potential_scale\n # Average, and broadcast if necessary\n eta_sei_av = pybamm.x_average(eta_sei)\n if eta_sei.domain == []:\n eta_sei = pybamm.FullBroadcast(\n eta_sei, self.domain_for_broadcast, \"current collector\"\n )\n elif eta_sei.domain == [\"current collector\"]:\n eta_sei = pybamm.PrimaryBroadcast(eta_sei, self.domain_for_broadcast)\n\n domain = self.domain.lower() + \" electrode\"\n variables = {\n self.domain + \" electrode SEI film overpotential\": eta_sei,\n \"X-averaged \" + domain + \" SEI film overpotential\": eta_sei_av,\n self.domain + \" electrode SEI film overpotential [V]\": eta_sei * pot_scale,\n \"X-averaged \"\n + domain\n + \" SEI film overpotential [V]\": eta_sei_av * pot_scale,\n }\n\n return variables\n\n def _get_standard_surface_potential_difference_variables(self, delta_phi):\n\n if self.domain == \"Negative\":\n ocp_ref = self.param.U_n_ref\n elif self.domain == \"Positive\":\n ocp_ref = self.param.U_p_ref\n pot_scale = self.param.potential_scale\n\n # Average, and broadcast if necessary\n if delta_phi.domain == []:\n delta_phi_av = delta_phi\n delta_phi_av_dim = ocp_ref + delta_phi_av * pot_scale\n delta_phi = pybamm.FullBroadcast(\n delta_phi_av, self.domain_for_broadcast, \"current collector\"\n )\n delta_phi_dim = pybamm.FullBroadcast(\n delta_phi_av_dim, self.domain_for_broadcast, \"current collector\"\n )\n elif delta_phi.domain == [\"current collector\"]:\n delta_phi_av = delta_phi\n delta_phi_av_dim = ocp_ref + delta_phi * pot_scale\n delta_phi = pybamm.PrimaryBroadcast(delta_phi_av, self.domain_for_broadcast)\n delta_phi_dim = pybamm.PrimaryBroadcast(\n delta_phi_av_dim, self.domain_for_broadcast\n )\n else:\n delta_phi_av = pybamm.x_average(delta_phi)\n delta_phi_av_dim = ocp_ref + delta_phi_av * pot_scale\n delta_phi_dim = ocp_ref + delta_phi * pot_scale\n\n variables = {\n self.domain + \" electrode surface potential difference\": delta_phi,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode surface potential difference\": delta_phi_av,\n self.domain + \" electrode surface potential difference [V]\": delta_phi_dim,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode surface potential difference [V]\": delta_phi_av_dim,\n }\n\n return variables\n\n def _get_standard_ocp_variables(self, ocp, dUdT):\n \"\"\"\n A private function to obtain the open circuit potential and\n related standard variables.\n\n Parameters\n ----------\n ocp : :class:`pybamm.Symbol`\n The open-circuit potential\n dUdT : :class:`pybamm.Symbol`\n The entropic change in ocp\n\n Returns\n -------\n variables : dict\n The variables dictionary including the open circuit potentials\n and related standard variables.\n \"\"\"\n\n # Average, and broadcast if necessary\n if ocp.domain == []:\n ocp_av = ocp\n ocp = pybamm.FullBroadcast(\n ocp, self.domain_for_broadcast, \"current collector\"\n )\n elif ocp.domain == [\"current collector\"]:\n ocp_av = ocp\n ocp = pybamm.PrimaryBroadcast(ocp, self.domain_for_broadcast)\n else:\n ocp_av = pybamm.x_average(ocp)\n dUdT_av = pybamm.x_average(dUdT)\n\n if self.domain == \"Negative\":\n ocp_dim = self.param.U_n_ref + self.param.potential_scale * ocp\n ocp_av_dim = self.param.U_n_ref + self.param.potential_scale * ocp_av\n elif self.domain == \"Positive\":\n ocp_dim = self.param.U_p_ref + self.param.potential_scale * ocp\n ocp_av_dim = self.param.U_p_ref + self.param.potential_scale * ocp_av\n\n variables = {\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" open circuit potential\": ocp,\n self.domain\n + \" electrode\"\n + self.reaction_name\n + \" open circuit potential [V]\": ocp_dim,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" open circuit potential\": ocp_av,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode\"\n + self.reaction_name\n + \" open circuit potential [V]\": ocp_av_dim,\n }\n if self.reaction_name == \"\":\n variables.update(\n {\n self.domain + \" electrode entropic change\": dUdT,\n \"X-averaged \"\n + self.domain.lower()\n + \" electrode entropic change\": dUdT_av,\n }\n )\n\n return variables\n","sub_path":"pybamm/models/submodels/interface/base_interface.py","file_name":"base_interface.py","file_ext":"py","file_size_in_byte":24244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"637474546","text":"#!/usr/bin/env python\nimport pmag\ndat=open('princ.di','rU').readlines()\nx,y,z=[],[],[]\nDirs=[]\nfor line in dat:\n rec=line.split()\n Dirs=[float(rec[0]),float(rec[1]),float(rec[2])*1e-4]\n cart=pmag.dir2cart(Dirs)\n x.append(cart[0])\n y.append(cart[1]*1.1)\n z.append(cart[2])\nfrom enthought.mayavi.mlab import points3d,savefig,show,outline,plot3d\nfrom numpy import array,sum,transpose\nfrom numpy.linalg import eig\nX,Y,Z=array(x),array(y),array(z)\npoints3d(X,Y,Z,color=(0,0,0),scale_factor=0.25,opacity=.5)\noutline(color=(.7,0,0))\nT=array([[sum(X*X),sum(X*Y),sum(X*Z)],\n [sum(Y*X),sum(Y*Y),sum(Y*Z)],\n [sum(Z*X),sum(Z*Y),sum(Z*Z)]])\nvals,vects=eig(T)\npv=transpose(vects)[0]*3.\nplot3d([pv[0],-pv[0]],[pv[1],-pv[1]],[pv[2],-pv[2]],tube_radius=0.1,color=(0,1,0))\nshow()\n\n","sub_path":"EPSFiles/plot3d.py","file_name":"plot3d.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"423814313","text":"import os\n\n\ndef calculate_orbits(tree, root):\n if root == None:\n return 0, 0\n if root not in tree:\n # leaf node if no children exist\n return 1, 0\n count = 0\n sum = 0\n print(f'child = {tree[root]}')\n for child in tree[root]:\n cur_count, cur_sum = calculate_orbits(tree, child)\n count += cur_count\n sum += cur_sum\n return count+1, sum + count\n\n\n# build graph using dict implementation with inputs\ntree = {}\ndirect_orbits = 0\nfilepath = f'{os.getcwd()}/Q6/input.txt'\nwith open(filepath, 'r') as f:\n data = f.read().splitlines()\n for orbit in data:\n a, b = orbit.split(')')\n if a not in tree:\n tree[a] = []\n tree[a].append(b)\n\n direct_orbits += 1\n\ncount, num_orbits = calculate_orbits(tree, \"COM\")\nprint(num_orbits)\n","sub_path":"Q6/Q6P1.py","file_name":"Q6P1.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"276472233","text":"\r\ndef count_words(filename):\r\n try:\r\n with open(filename, encoding = 'utf-8') as f_obj:\r\n contents = f_obj.read()\r\n except FileNotFoundError:\r\n msg = \"Sorry, the file \" + filename + \" does not exist.\"\r\n print(msg)\r\n else:\r\n words = contents.split()\r\n num = len(words)\r\n print(num)\r\n\r\nfilename = 'D:\\Workspace\\python\\py_learning\\Alice.txt'\r\ncount_words(filename)","sub_path":"py37/src/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"303627441","text":"\nfrom funcx.sdk.client import FuncXClient\n\nfxc = FuncXClient()\n\nlocation = '039706667969.dkr.ecr.us-east-1.amazonaws.com/xtract-bert'\ndescription = 'Xtract text types container using pretrained BERT NER model'\ncontainer_type = 'docker'\nname = \"xtract/bert\"\ncontainer_uuid = fxc.register_container(name, location, description, container_type)\n\n\ndef bert_extract(event):\n import io\n import os\n import sys\n import time\n import pickle\n import tempfile\n\n from googleapiclient.discovery import build\n from googleapiclient.http import MediaIoBaseDownload\n from google_auth_oauthlib.flow import InstalledAppFlow\n from google.auth.transport.requests import Request\n\n t0 = time.time()\n\n try:\n sys.path.insert(1, '/')\n from xtract_bert_main import load_model, extract_text_metadata\n\n t_model_load_start = time.time()\n # TODO: multiproc (load both at once in background while downloading files).\n bert_model = load_model('bert')\n w2v_model = load_model('w2v')\n\n t_model_load_end = time.time()\n\n model_loading_time = t_model_load_end - t_model_load_start\n\n except Exception as e:\n return e\n\n # A list of file paths\n all_families = event['inputs']\n dir_name = tempfile.mkdtemp()\n os.chdir(dir_name)\n\n def generate_drive_connection(creds):\n service = build('drive', 'v3', credentials=creds)\n return service\n\n all_results = []\n for family in all_families:\n # TODO: BREAK INTO DOWNLOAD PHASE, THEN PROCESS PHASE.\n\n d_type = \"GDRIVE\"\n\n new_mdata = None\n if d_type is \"GDRIVE\":\n\n creds = pickle.loads(event[\"gdrive_pkl\"])[0]\n file_id = event[\"file_id\"]\n is_gdoc = event[\"is_gdoc\"]\n mimeType = \"text/csv\"\n try:\n service = generate_drive_connection(creds)\n ta = time.time()\n\n try:\n if is_gdoc:\n request = service.files().export(fileId=file_id, mimeType=mimeType)\n else:\n request = service.files().get_media(fileId=file_id)\n except Exception as e:\n return f\"[Xtract] Was unable to launch Google service request: {e}\"\n\n fh = io.FileIO(file_id, 'wb')\n downloader = MediaIoBaseDownload(fh, request)\n except Exception as e:\n return f\"[Xtract] Unable to perform MediaBaseDownload: {e}\"\n\n try:\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n print(\"Download %d%%.\" % int(status.progress() * 100))\n except Exception as e:\n return e\n\n tb = time.time()\n\n try:\n bert_mdata = extract_text_metadata(file_id, 'bert', bert_model)\n w2v_mdata = extract_text_metadata(file_id, 'w2v', w2v_model)\n all_results.append({file_id: {'bert': bert_mdata, 'w2v': w2v_mdata}})\n except Exception as e:\n return e\n\n t1 = time.time()\n return {'metadata': all_results, 'tot_time': t1-t0, 'trans_time': tb-ta}\n","sub_path":"extractors/xtract_bert.py","file_name":"xtract_bert.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"245051843","text":"#-*- encoding: gb2312 -*-\n# @Time : 2018/8/7 15:44\n# @Author : zhangjixu\n# _*_ coding=gb2312 _*_\nimport email\n\nfp = open(\"D:\\\\chen.eml\", \"r\")\nmsg = email.message_from_file(fp)\n\nif __name__ == '__main__':\n # 循环信件中的每一个mime的数据块\n for par in msg.walk():\n if not par.is_multipart(): # 这里要判断是否是multipart,是的话,里面的数据是无用的,至于为什么可以了解mime相关知识。\n name = par.get_param(\"name\") # 如果是附件,这里就会取出附件的文件名\n if name:\n # 有附件\n # 下面的三行代码只是为了解码象=?gbk?Q?=CF=E0=C6=AC.rar?=这样的文件名\n h = email.Header.Header(name)\n dh = email.Header.decode_header(h)\n fname = dh[0][0]\n print('附件名:', fname)\n data = par.get_payload(decode=True) # 解码出附件数据,然后存储到文件中\n print(data)\n try:\n f = open(fname, 'wb') # 注意一定要用wb来打开文件,因为附件一般都是二进制文件\n except:\n print('附件名有非法字符,自动换一个')\n f = open('aaaa', 'wb')\n f.write(data)\n f.close()\n else:\n # 不是附件,是文本内容\n print(par.get_payload(decode=True)) # 解码出文本内容,直接输出来就可以了。\n","sub_path":"python/read_eml.py","file_name":"read_eml.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"267679596","text":"# coding=utf8\n\n# Copyright 2018 JDCLOUD.COM\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This class is auto generated by the jdcloud code generator program.\n\n\nclass DBInstance(object):\n\n def __init__(self, instanceId=None, instanceName=None, instanceType=None, engine=None, engineVersion=None, instanceClass=None, instanceStorageGB=None, instanceCPU=None, instanceMemoryMB=None, regionId=None, azId=None, vpcId=None, subnetId=None, instanceStatus=None, createTime=None, backupSynchronicity=None, charge=None, tags=None, sourceInstanceId=None, vpcName=None, dbUrl=None):\n \"\"\"\n :param instanceId: (Optional) 实例ID\n :param instanceName: (Optional) 实例名称,具体规则可参见帮助中心文档:[名称及密码限制](../../../documentation/Database-and-Cache-Service/RDS/Introduction/Restrictions/SQLServer-Restrictions.md)\n :param instanceType: (Optional) 实例类别,例如主实例,只读实例等,参见[枚举参数定义](../Enum-Definitions/Enum-Definitions.md)\n :param engine: (Optional) 实例引擎类型,如MySQL或SQL Server等,参见[枚举参数定义](../Enum-Definitions/Enum-Definitions.md)\n :param engineVersion: (Optional) 实例引擎版本,参见[枚举参数定义](../Enum-Definitions/Enum-Definitions.md)\n :param instanceClass: (Optional) 实例规格代码\n :param instanceStorageGB: (Optional) 磁盘,单位GB\n :param instanceCPU: (Optional) CPU核数\n :param instanceMemoryMB: (Optional) 内存,单位MB\n :param regionId: (Optional) 地域ID,参见[地域及可用区对照表](../Enum-Definitions/Regions-AZ.md)\n :param azId: (Optional) 可用区ID,第一个为主实例在的可用区,参见[地域及可用区对照表](../Enum-Definitions/Regions-AZ.md)\n :param vpcId: (Optional) VPC的ID\n :param subnetId: (Optional) 子网的ID\n :param instanceStatus: (Optional) 实例状态,参见[枚举参数定义](../Enum-Definitions/Enum-Definitions.md)\n :param createTime: (Optional) 实例创建时间\n :param backupSynchronicity: (Optional) 实例跨地域备份服务开启相关信息\n :param charge: (Optional) 计费配置\n :param tags: (Optional) 标签信息\n :param sourceInstanceId: (Optional) MySQL只读实例对应的主实例ID\n :param vpcName: (Optional) vpc名称\n :param dbUrl: (Optional) DMS登陆数据库链接\n \"\"\"\n\n self.instanceId = instanceId\n self.instanceName = instanceName\n self.instanceType = instanceType\n self.engine = engine\n self.engineVersion = engineVersion\n self.instanceClass = instanceClass\n self.instanceStorageGB = instanceStorageGB\n self.instanceCPU = instanceCPU\n self.instanceMemoryMB = instanceMemoryMB\n self.regionId = regionId\n self.azId = azId\n self.vpcId = vpcId\n self.subnetId = subnetId\n self.instanceStatus = instanceStatus\n self.createTime = createTime\n self.backupSynchronicity = backupSynchronicity\n self.charge = charge\n self.tags = tags\n self.sourceInstanceId = sourceInstanceId\n self.vpcName = vpcName\n self.dbUrl = dbUrl\n","sub_path":"jdcloud_sdk/services/yunding/models/DBInstance.py","file_name":"DBInstance.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"551778041","text":"#Q.1- Create a function to calculate the area of a sphere by taking radius from user. \r\ndef sphere(r):\r\n pi=3.14\r\n area=4*pi*r*r\r\n return area\r\nr=int(input(\"Enter the radius : \"))\r\nprint('Area of sqaure is :',sphere(r))\r\n\r\n#Q.2-Prints All the Perfect Numbers Between 1 and 1000\r\nl=[]\r\ndef perfect(n):\r\n p=0\r\n \r\n for i in range(1,n):\r\n if n%i==0:\r\n p=p+i\r\n if p==n:\r\n l.append(p)\r\n return l\r\nfor j in range(1,1000):\r\n a=perfect(j)\r\n\r\nprint(a)\r\n\r\n#Q.3-Print Multiplication Table of a User Defined Number\r\nn=int(input('Enter the number:'))\r\nif n>=1:\r\n for x in range (1,11):\r\n y=n * x\r\n print(y)\r\n\r\n#Q.4-Write a function to calculate power of a number raised to other ( a^b ) using recursion.\r\n\r\ndef power(base,exp):\r\n if(exp==1):\r\n return(base)\r\n if(exp!=1):\r\n return(base*power(base,exp-1))\r\nbase=int(input(\"Enter base: \"))\r\nexp=int(input(\"Enter exponential value: \"))\r\nprint(\"Result:\",power(base,exp))\r\n","sub_path":"ass6.py","file_name":"ass6.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"171471987","text":"def sol(N, M, a):\n s, e = 0, 0\n subsum = a[0]\n cnt = 0\n while True:\n if subsum < M: # 작으면 end point +1\n e += 1\n if e >= N: # 인덱스 범위 주의\n break\n subsum += a[e]\n elif subsum == M: # 같으면 cnt +1\n cnt += 1\n subsum -= a[s]\n s += 1\n else: # 크면 start point +1\n subsum -= a[s]\n s += 1\n return cnt\n\n\nN, M = map(int, input().split())\na = list(map(int, input().split()))\nprint(sol(N, M, a))\n\n\n","sub_path":"src/baekjoon/2003.py","file_name":"2003.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"115478180","text":"\"\"\"\r\nCreated on 23 May 2017\r\n\r\n@author: Maria Schilstra\r\n\"\"\"\r\n#from PyQt5 import QtGui as gui\r\n#from PyQt5 import QtCore as qt\r\nimport numpy as np, copy as cp\r\nimport math\r\nfrom PyQt5 import QtWidgets as widgets\r\n\r\nfrom matplotlib.figure import Figure\r\nimport matplotlib.ticker as ticker\r\nimport matplotlib.gridspec as gridspec\r\nfrom matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg as FigureCanvas,\r\n NavigationToolbar2QT)\r\n\r\n\r\nclass MplCanvas(FigureCanvas):\r\n \"\"\" \r\n Class representing the FigureCanvas widget to be embedded in the GUI\r\n \"\"\"\r\n colour_seq = ['blue',\r\n 'green',\r\n 'red',\r\n 'orange',\r\n 'cyan',\r\n 'magenta',\r\n 'purple',\r\n 'brown',\r\n 'white',\r\n 'black'\r\n ] \r\n\r\n def __init__(self, parent):\r\n self.fig = Figure()\r\n \r\n self.gs = gridspec.GridSpec(10, 1) \r\n self.gs.update(left=0.15, right=0.95, top=0.95, bottom=0.1, hspace=5.0)\r\n self.data_plot = self.fig.add_subplot(self.gs[2:,:])\r\n self.data_res_plot = self.fig.add_subplot(self.gs[0:2,:], sharex=self.data_plot)\r\n FigureCanvas.__init__(self, self.fig)\r\n self.setParent(parent)\r\n FigureCanvas.setSizePolicy(self, widgets.QSizePolicy.Preferred, widgets.QSizePolicy.Preferred)\r\n FigureCanvas.updateGeometry(self) \r\n \r\n self.curve_colours = {}\r\n self.vline0, self.vline1 = None, None\r\n \r\n def on_move(self):\r\n pass\r\n\r\n def set_fig_annotations(self, ylabel=\"Value\", rlabel=\"Residuals\"):\r\n self.data_plot.set_ylabel(ylabel)\r\n self.data_res_plot.set_ylabel(rlabel)\r\n self.data_res_plot.locator_params(axis='y',nbins=4)\r\n \r\n def set_colours(self, series_names):\r\n self.curve_colours = {}\r\n for name in series_names:\r\n i = series_names.index(name) % len(self.colour_seq)\r\n self.curve_colours[name] = self.colour_seq[i]\r\n \r\n def get_series_colour(self, series_name):\r\n if self.series_in_plot(series_name):\r\n return self.curve_colours[series_name]\r\n return ''\r\n \r\n def series_in_plot(self, series_name):\r\n return series_name in self.curve_colours\r\n \r\n def has_vertical_lines(self):\r\n return self.vline0 is not None and self.vline1 is not None\r\n \r\n def get_vline_positions(self):\r\n if self.has_vertical_lines():\r\n x0 = self.vline0.get_x()\r\n x1 = self.vline1.get_x()\r\n if x1 < x0:\r\n return np.array([x1, x0])\r\n return np.array([x0, x1])\r\n return None\r\n \r\n def clear_plots(self):\r\n self.data_plot.cla()\r\n self.data_res_plot.cla()\r\n self.set_fig_annotations()\r\n self.fig.canvas.draw()\r\n \r\n def draw_series(self, series_name, x, y, kind, ):\r\n \"\"\"\r\n Draw a single curve.\r\n @series_name: series id (string, must be unique)\r\n @x: x-axis values (pandas series)\r\n @y: y-axis values (pandas series)\r\n @kind: 'primary', 'calculated', 'residuals'\r\n \"\"\"\r\n xdif = np.mean(np.diff(x))\r\n xspan = np.max(x) - np.min(x)\r\n if kind in ('primary', 'residuals'):\r\n marker = 'o'\r\n if xdif != 0.0:\r\n if xspan / xdif > 50: \r\n marker = '-'\r\n elif kind == 'calculated':\r\n marker = '--'\r\n if not self.series_in_plot(series_name):\r\n i = len(self.curve_colours.keys()) % len(self.colour_seq)\r\n self.curve_colours[series_name] = self.colour_seq[i]\r\n if kind in ('primary', 'calculated'):\r\n self.data_plot.plot(x, y, marker, color=self.curve_colours[series_name])\r\n if kind == 'residuals':\r\n self.data_res_plot.plot(x, y, marker, color=self.curve_colours[series_name])\r\n self.data_plot.ticklabel_format(style='sci', scilimits=(-3,3), axis='both') \r\n self.fig.canvas.draw()\r\n \r\n def set_vlines(self, x_limits=None):\r\n x_outer_limits = self.data_plot.get_xlim()\r\n if x_limits is None:\r\n x_limits = cp.deepcopy(x_outer_limits)\r\n if math.isclose(x_limits[0], x_limits[1]):\r\n x_limits = cp.deepcopy(x_outer_limits)\r\n# dx = abs((x_outer_limits[0] - x_limits[0]) / 100.0)\r\n# x_limits[0] = x_limits[0] - dx\r\n# x_limits[1] = x_limits[1] + dx\r\n self.vline0 = DraggableLine(self.data_plot.axvline(x_limits[0],\r\n lw=1, \r\n ls='--', \r\n color='k'), x_outer_limits)\r\n self.vline1 = DraggableLine(self.data_plot.axvline(x_limits[1],\r\n lw=1, \r\n ls='--', \r\n color='k'), x_outer_limits) \r\n self.fig.canvas.draw() \r\n \r\n\r\nclass NavigationToolbar(NavigationToolbar2QT):\r\n \r\n def __init__(self, canvas_, parent_):\r\n self.toolitems = tuple([t for t in NavigationToolbar2QT.toolitems if\r\n t[0] in ('Home', 'Back', 'Forward', 'Pan', 'Zoom', 'Save')])\r\n NavigationToolbar2QT.__init__(self,canvas_,parent_) \r\n \r\n def switch_off_pan_zoom(self):\r\n if self._active == \"PAN\":\r\n self.pan()\r\n elif self._active == \"ZOOM\":\r\n self.zoom()\r\n \r\nclass DraggableLine:\r\n \"\"\"\r\n Based on DraggableRectangle exercise in https://matplotlib.org/users/event_handling.html\r\n \"\"\"\r\n def __init__(self, line, xlims):\r\n self.line = line\r\n self.vline_xlims = xlims\r\n self.connect()\r\n self.press = None\r\n \r\n def get_x(self):\r\n return self.line.get_xdata()[0]\r\n \r\n def connect(self):\r\n 'connect to all the events we need'\r\n self.cidpress = self.line.figure.canvas.mpl_connect(\r\n 'button_press_event', self.on_press)\r\n self.cidrelease = self.line.figure.canvas.mpl_connect(\r\n 'button_release_event', self.on_release)\r\n self.cidmotion = self.line.figure.canvas.mpl_connect(\r\n 'motion_notify_event', self.on_motion)\r\n\r\n def on_press(self, event):\r\n 'on button press we will see if the mouse is over us and store some data'\r\n if event.inaxes == self.line.axes: \r\n contained = self.line.contains(event)[0]\r\n if contained:\r\n self.press = event.xdata \r\n return\r\n \r\n def on_motion(self, event):\r\n 'on motion we will move the line if the mouse is over us'\r\n if self.press is None: \r\n return\r\n if event.inaxes != self.line.axes: \r\n return\r\n if event.xdata < self.vline_xlims[0] or event.xdata > self.vline_xlims[1]:\r\n return\r\n newx = np.ones_like(self.line.get_xdata()) * event.xdata\r\n self.line.set_xdata(newx)\r\n self.line.figure.canvas.draw()\r\n\r\n def on_release(self, event):\r\n 'on release we reset the press data'\r\n self.press = None\r\n self.line.figure.canvas.draw()\r\n\r\n def disconnect(self):\r\n 'disconnect all the stored connection ids'\r\n self.line.figure.canvas.mpl_disconnect(self.cidpress)\r\n self.line.figure.canvas.mpl_disconnect(self.cidrelease)\r\n self.line.figure.canvas.mpl_disconnect(self.cidmotion)\r\n","sub_path":"src/koektrommel/crux_mpl.py","file_name":"crux_mpl.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"348902049","text":"# patching version incompatibility between missingpy and sklearn ...\nimport sys\nimport sklearn.neighbors._base\n\n# need to add this function to sklearn.neighbors._base\n# since it got removed in sklearn 0.22.1\ndef _check_weights(weights):\n \"\"\"Check to make sure weights are valid\"\"\"\n if weights in (None, 'uniform', 'distance'):\n return weights\n elif callable(weights):\n return weights\n else:\n raise ValueError(\"weights not recognized: should be 'uniform', \"\n \"'distance', or a callable function\")\n\nsklearn.neighbors._base._check_weights = _check_weights\nsys.modules[\"sklearn.neighbors.base\"] = sklearn.neighbors._base\n\nfrom missingpy import MissForest as MF\nfrom NAIVI import MICE\n\n\nclass MissForest(MICE):\n\n def __init__(self, K, N, p_cts, p_bin):\n super().__init__(K, N, p_cts, p_bin)\n self.model = MF()","sub_path":"NAIVI/mf/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"352720151","text":"from django.db import models\n\nfrom django.contrib.auth import get_user_model\n\nfrom random import randint \n# Create your models here.\n\nUser = get_user_model()\n\nclass Cart(models.Model):\n customer = models.ForeignKey(\n User,\n related_name='carts',\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n )\n created_date = models.DateTimeField(\n \"Created date\",\n auto_now=False,\n auto_now_add=True,\n )\n\n def total_price(self):\n price = 0\n for book_in_cart in self.books.all():\n price += book_in_cart.price\n return price\n\nclass BookInCart(models.Model):\n cart = models.ForeignKey(\n Cart,\n on_delete=models.CASCADE,\n related_name='books'\n )\n book = models.ForeignKey(\n 'books.Book',\n on_delete=models.PROTECT,\n related_name='books_in_carts'\n )\n quantity = models.IntegerField(\n \"Quantity\",\n default=1\n )\n price = models.DecimalField(\n \"Price\",\n decimal_places=2,\n max_digits=5\n )\n\n def __str__(self) -> str:\n return f\"{self.book.name_book} in cart for {self.cart.customer.username}\"\n def construct_prise(self):\n self.quantity * randint(25, 250)\n ","sub_path":"src/orders/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"450180890","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom django.conf.urls import patterns, url\nfrom z_FileShow import views\n\nurlpatterns = [\n\t\turl(r'^show/$',views.show,name='show'),\n\t\turl(r'^File_detach/$',views.File_detach,name='File_detach'),\n\t\turl(r'^dele_file/$',views.dele_file,name='dele_file')\n\t]","sub_path":"z_FileShow/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"574305296","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom scrapy import Spider, Request\nfrom scrapy.settings import Settings\nfrom scrapy.crawler import CrawlerRunner\nfrom twisted.internet import reactor\nfrom json import loads\n\nclass UserAgentSpider(Spider):\n \"\"\"Spider to test user agents\"\"\"\n name = 'user_agent'\n # this must be set to a truthy value in order for the middleware to take effect\n random_user_agent = True\n\n def start_requests(self):\n # send 5 requests to httpbin.org in order to retrieve the user-agent the page sees\n for i in range(0, 5):\n yield Request(\n 'https://httpbin.org/user-agent',\n self.parse,\n # disable duplication filter for scrapy requests\n dont_filter=True\n )\n\n def parse(self, response):\n # get user-agent from json string\n user_agent = loads(response.text)['user-agent']\n print(user_agent)\n yield {'user_agent': user_agent}\n\ndef get_settings() -> Settings:\n \"\"\"create and return a scrapy settings object\"\"\"\n settings = Settings()\n # load a list of user agents from http://51.158.74.109\n settings.set('USER_AGENTS', [x['useragent'] for x in requests.get('http://51.158.74.109/useragents/?format=json').json()])\n # enable the RandomUserAgentMiddleware middleware\n settings.set('DOWNLOADER_MIDDLEWARES', {\n 'middleware.RandomUserAgentMiddleware': 150,\n })\n return settings\n\nif __name__ == '__main__':\n # routine to run scrapy from a script\n # see: https://docs.scrapy.org/en/latest/topics/practices.html#run-scrapy-from-a-script\n settings = get_settings()\n runner = CrawlerRunner(settings)\n d = runner.crawl(UserAgentSpider)\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n","sub_path":"Middlewares/RandomUserAgent/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"225233853","text":"import numpy as np\nimport geopandas as gpd\nimport pandas as pd\nfrom shapely.geometry import Polygon\n\nfrom sentinelhub import CRS, transform_bbox, GeopediaFeatureIterator\nfrom eolearn.core import EOTask, EOPatch, FeatureType\nfrom skimage.morphology import disk, binary_dilation, binary_erosion\n\nfrom .utilities import get_slovenia_crop_geopedia_idx_to_crop_id_mapping\nfrom .utilities import get_austria_crop_geopedia_idx_to_crop_id_mapping\nfrom .utilities import get_danish_crop_geopedia_idx_to_crop_id_mapping\n\nclass ValidDataFractionPredicate:\n \"\"\"\n Predicate that defines if a frame from EOPatch's time-series is valid or not. Frame is valid, if the\n valid data fraction is above the specified threshold.\n \"\"\"\n\n def __init__(self, threshold):\n self.threshold = threshold\n\n def __call__(self, array):\n coverage = np.sum(array.astype(np.uint8)) / np.prod(array.shape)\n return coverage > self.threshold\n\n\nclass CreatePatch(EOTask):\n \"\"\"Creates an empty EOPatch for given BBOX.\"\"\"\n \n def execute(self, *, bbox):\n \"\"\"Returns newly created EOPatch given BBOX. \n \n :param bbox: specifies the bounding box of the EOPatch. Coordinates must be in\n the specified coordinate reference system. Required.\n :type bbox: BBox\n :return: new empty EOPatch for given BBOX.\n :rtype: EOPatch\n \"\"\" \n eopatch = EOPatch()\n eopatch.bbox = bbox\n \n return eopatch\n \n\nclass FixLPIS(EOTask):\n \"\"\"\n Fixes known issues of LPIS data stored as vector_timeless feature in the EOPatch.\n\n Known issues depend on the country and are:\n * Slovenia:\n * column \"SIFRA_KMRS\" of vector_timeless[\"LPIS_{year}\"] represents index in geopedia's\n table \"Crop type classification for Slovenia\" and not CROP ID as the name suggests\n * This task replaces \"SIFRA_KMRS\" with \"SIFKMRS\" that truly represents CROP ID\n * CROP IDs are strings and not integers, which represents a problem when burning in the\n LPIS data to raster.\n * This task replaces \"204_a\" with \"1204\"\n * column is casted to numeric\n * Austria:\n * column \"SNAR_BEZEI\" of vector_timeless[\"LPIS_{year}\"] represents index in geopedia's\n table \"Austria LPIS (SNAR_BEZEI)\" and not CROP NAME as the name suggests\n * a new column is added \"SNAR_BEZEI_NAME\" with the CROP NAME as appears in Austrian LPIS data\n * Denmark:\n * columns \"CropName\" and \"PreCropName\" of vector_timeless[\"LPIS_{year}\"] represents index in geopedia's\n table \"DK LPIS crop type\" and not CROP NAME as the name suggests\n * they are replaced with two new columns \"Crop Name\" and \"PreCrop Name\" with the CROP NAME as\n appears in Danish LPIS data\n\n :param feature: Name of the vector_timeless feature with LPIS data\n :type feature: str\n :param country: Name of the country\n :type country: str\n \"\"\"\n def __init__(self, feature, country):\n self.feature = feature\n self.country = country\n self.mapping = None\n\n self._set_mapping()\n\n def _set_mapping(self):\n if self.country is 'Slovenia':\n self.mapping = get_slovenia_crop_geopedia_idx_to_crop_id_mapping()\n elif self.country is 'Austria':\n self.mapping = get_austria_crop_geopedia_idx_to_crop_id_mapping()\n elif self.country is 'Denmark':\n self.mapping = get_danish_crop_geopedia_idx_to_crop_id_mapping()\n\n def _fix_slovenian_lpis(self, eopatch):\n \"\"\"\n See Task's docs for the explanation of what is done.\n \"\"\"\n eopatch.vector_timeless[self.feature].rename(index=str, columns={\"SIFRA_KMRS\": \"crop_geopedia_idx\"},\n inplace=True)\n eopatch.vector_timeless[self.feature] = pd.merge(eopatch.vector_timeless[self.feature],\n self.mapping,\n on='crop_geopedia_idx')\n eopatch.vector_timeless[self.feature].loc[eopatch.vector_timeless[self.feature]['SIFKMRS'] == '204_a',\n 'SIFKMRS'] = '1204'\n eopatch.vector_timeless[self.feature]['SIFKMRS'] = pd.to_numeric(eopatch.vector_timeless[self.feature]['SIFKMRS'])\n\n\n def _fix_austrian_lpis(self, eopatch):\n \"\"\"\n See Task's docs for the explanation of what is done.\n \"\"\"\n eopatch.vector_timeless[self.feature] = pd.merge(eopatch.vector_timeless[self.feature],\n self.mapping,\n on='SNAR_BEZEI')\n\n def _fix_danish_lpis(self, eopatch):\n \"\"\"\n See Task's docs for the explanation of what is done.\n \"\"\"\n eopatch.vector_timeless[self.feature].rename(index=str, columns={\"CropName\": \"crop_geopedia_idx\"}, inplace=True)\n eopatch.vector_timeless[self.feature] = pd.merge(eopatch.vector_timeless[self.feature],\n self.mapping,\n on='crop_geopedia_idx')\n eopatch.vector_timeless[self.feature]['crop_geopedia_idx'] = eopatch.vector_timeless[self.feature]['PreCropName']\n self.mapping.rename(index=str, columns={\"Crop Name\": \"PreCrop Name\"}, inplace=True)\n eopatch.vector_timeless[self.feature] = pd.merge(eopatch.vector_timeless[self.feature],\n self.mapping,\n on='crop_geopedia_idx')\n eopatch.vector_timeless[self.feature].drop(['crop_geopedia_idx', 'PreCropName'], axis=1, inplace=True)\n\n def execute(self, eopatch):\n if self.country is 'Slovenia':\n self._fix_slovenian_lpis(eopatch)\n elif self.country is 'Austria':\n self._fix_austrian_lpis(eopatch)\n elif self.country is 'Denmark':\n self._fix_danish_lpis(eopatch)\n\n return eopatch\n\n\nclass AddGeopediaVectorFeature(EOTask):\n \"\"\"\n Add vector data from Geopedia.\n \"\"\"\n def __init__(self, feature, layer, year_filter=None, drop_duplicates=False):\n self.feature_type, self.feature_name = next(self._parse_features(feature)())\n self.layer = layer\n self.drop_duplicates = drop_duplicates\n self.year_col_name = year_filter[0] if year_filter is not None else None\n self.year = year_filter[1] if year_filter is not None else None\n \n def execute(self, eopatch):\n # convert to 3857 CRS\n bbox_3857 = transform_bbox(eopatch.bbox, CRS.POP_WEB)\n \n # get iterator over features\n gpd_iter = GeopediaFeatureIterator(layer=self.layer, bbox=bbox_3857)\n\n features = list(gpd_iter)\n if len(features):\n gdf = gpd.GeoDataFrame.from_features(features)\n gdf.crs = {'init': 'epsg:4326'}\n # convert back to EOPatch CRS\n gdf = gdf.to_crs({'init': f'epsg:{eopatch.bbox.crs.value}'})\n\n if self.year:\n # Filter by years\n gdf = gdf.loc[gdf[self.year_col_name].isin([self.year])]\n \n if self.drop_duplicates:\n sel = gdf.drop('geometry', axis=1)\n sel = sel.drop_duplicates()\n gdf = gdf.loc[sel.index]\n \n eopatch[self.feature_type][self.feature_name] = gdf \n\n return eopatch\n \n\nclass AddAreaRatio(EOTask):\n \"\"\"\n Calculates the ratio between \n \n area of all fields (vector data) / total area of the patch.\n \n This information can be used for example to exclude EOPatches with no or very small area of cultivated land.\n \"\"\"\n def __init__(self, vector_feature, area_feature):\n self.in_feature_type, self.in_feature_name = next(self._parse_features(vector_feature)())\n self.out_feature_type, self.out_feature_name = next(self._parse_features(area_feature)())\n\n def execute(self, eopatch):\n ratio = np.array([-1.0])\n if self.in_feature_name not in eopatch[self.in_feature_type]:\n eopatch[self.out_feature_type][self.out_feature_name] = ratio\n return eopatch\n \n gdf = eopatch[self.in_feature_type][self.in_feature_name]\n ratio = np.array([0.0])\n if gdf is not None:\n bbox_poly = Polygon(eopatch.bbox.get_polygon())\n ratio = np.array([np.sum(gdf.area.values) / bbox_poly.area])\n\n eopatch[self.out_feature_type][self.out_feature_name] = ratio\n \n return eopatch\n\n\nclass Sen2CorValidData:\n \"\"\"\n Combine Sen2Cor's classification map with `IS_DATA` to define a valid data mask.\n The valid data mask is post-processed (optional).\n\n The Sen2Cor's classification map is asumed to be found in eopatch.mask['SCL']\n \"\"\"\n\n def __init__(self, valid_classes, erosion_radius=0, dilation_radius=0):\n self.valid = valid_classes\n self.erosion = erosion_radius\n self.dilation = dilation_radius\n\n def __call__(self, eopatch):\n sen2cor_valid = np.zeros_like(eopatch.mask['SCL'], dtype=np.bool)\n\n for valid in self.valid:\n sen2cor_valid = np.logical_or(sen2cor_valid, (eopatch.mask['SCL'] == valid))\n\n sen2cor_valid = sen2cor_valid.squeeze()\n if self.erosion:\n sen2cor_valid = np.logical_not(\n np.asarray([binary_erosion(np.logical_not(mask), disk(self.erosion)) for mask in sen2cor_valid],\n dtype=np.bool))\n\n if self.dilation:\n sen2cor_valid = np.logical_not(\n np.asarray([binary_dilation(np.logical_not(mask), disk(self.dilation)) for mask in sen2cor_valid],\n dtype=np.bool))\n\n return np.logical_and(eopatch.mask['IS_DATA'].astype(np.bool), sen2cor_valid[..., np.newaxis])\n\n\nclass SentinelHubValidData:\n \"\"\"\n Combine s2cloudless cloud map with `IS_DATA` to define a `VALID_DATA_SH` mask\n\n The SentinelHub's cloud mask is asumed to be found in eopatch.mask['CLM']\n \"\"\"\n\n def __call__(self, eopatch):\n return np.logical_and(eopatch.mask['IS_DATA'].astype(np.bool),\n np.logical_not(eopatch.mask['CLM'].astype(np.bool)))\n\n\nclass MergeMasks:\n \"\"\"\n Merges two specified masks with logical and operation and returns it.\n \"\"\"\n\n def __init__(self, mask_a, mask_b):\n self.mask_a = mask_a\n self.mask_b = mask_b\n\n def __call__(self, eopatch):\n return np.logical_and(eopatch.mask[self.mask_a].astype(np.bool),\n eopatch.mask[self.mask_b].astype(np.bool))\n\n\nclass CountValid(EOTask):\n \"\"\"\n The task counts number of valid observations in time-series and stores the results in the timeless mask.\n \"\"\"\n\n def __init__(self, count_what, feature_name):\n self.what = count_what\n self.name = feature_name\n\n def execute(self, eopatch):\n eopatch.add_feature(FeatureType.MASK_TIMELESS, self.name, np.count_nonzero(eopatch.mask[self.what], axis=0))\n\n return eopatch\n","sub_path":"CropData/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":11242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"230925554","text":"from random import randint\nprint(\"...rock...\\n...paper...\\n...scissors...\\n\")\nplayer = input(\"Please enter your move\\n\")\ncomputer = randint(0,2)\nif computer == 0:\n computer = \"rock\"\nelif computer == 1:\n computer = \"paper\"\nelse:\n computer = \"scissors\"\n\nplayer = player.upper() \ncomputer = computer.upper()\nprint(\"computer: \"+ computer)\nif player == computer:\n print(\"It's a draw!\")\nelif player == \"rock\".upper():\n if computer == \"paper\".upper():\n print(\"computer wins!\")\n else:\n print(\"player 1 won!\")\nelif player == \"paper\".upper():\n if computer == \"scissors\".upper():\n print(\"computer wins!\")\n else:\n print(\"player 1 won!\")\nelif player == \"scissors\".upper():\n if computer == \"rock\".upper():\n print(\"computer wins!\")\n else:\n print(\"player 1 won!\")\nelse:\n print(\"please enter a valid entry...\")","sub_path":"rockPaperScissors/ver2.py","file_name":"ver2.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"525855846","text":"## 1. Introduction ##\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nbike_sharing = pd.read_csv('day.csv')\nbike_sharing['dteday'] = pd.to_datetime(bike_sharing['dteday'])\n\nplt.scatter(bike_sharing[\"workingday\"], bike_sharing[\"casual\"])\nplt.title(\"Working Day Vs. Casual\")\nplt.show()\n\nplt.scatter(bike_sharing[\"workingday\"], bike_sharing[\"registered\"])\nplt.title(\"Working Day Vs. Registered\")\nplt.show()\n\n## 2. Bar Plots ##\n\nimport matplotlib.pyplot as plt\nworking_days = ['Non-Working Day', 'Working Day']\nregistered_avg = [2959, 3978]\n\nplt.bar(working_days, registered_avg)\nplt.show()\n\n## 3. Customizing Bar Plots ##\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nbike_sharing = pd.read_csv('day.csv')\nbike_sharing['dteday'] = pd.to_datetime(bike_sharing['dteday'])\nweekday_averages = bike_sharing.groupby('weekday').mean()[['casual', 'registered']].reset_index() # It's not essential to understand how this code works, we'll cover this in a later course\n\nplt.bar(weekday_averages[\"weekday\"], weekday_averages[\"registered\"])\nplt.xticks(ticks = weekday_averages[\"weekday\"], labels=['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'], rotation=30)\nplt.show()\n\n## 4. Frequency Tables ##\n\nimport matplotlib.pyplot as plt\n\nunique_values = [1, 2, 3, 4]\nweather_2011 = [226, 124, 15, 0]\nweather_2012 = [237, 123, 6, 0]\n\nplt.bar(unique_values, weather_2011)\nplt.xticks(ticks=[1,2,3,4])\nplt.title('Weather Patterns: 2011')\nplt.ylabel('Frequency')\nplt.xlabel('Unique Values')\nplt.show()\n\nplt.bar(unique_values, weather_2012)\nplt.xticks(ticks=[1,2,3,4])\nplt.title('Weather Patterns: 2012')\nplt.ylabel('Frequency')\nplt.xlabel('Unique Values')\nplt.show()\n\n## 5. Grouped Frequency Tables ##\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nbike_sharing = pd.read_csv('day.csv')\nbike_sharing['dteday'] = pd.to_datetime(bike_sharing['dteday'])\n\nregistered_freq = bike_sharing[\"registered\"].value_counts(bins=10).sort_index() \ncasual_freq = bike_sharing[\"casual\"].value_counts(bins=10).sort_index() \n\n## 6. Histograms ##\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nbike_sharing = pd.read_csv('day.csv')\nbike_sharing['dteday'] = pd.to_datetime(bike_sharing['dteday'])\n\nplt.hist(bike_sharing[\"casual\"])\nplt.show()\n\n## 7. The Normal Distribution ##\n\nsentence_1=True\nsentence_2=False\nsentence_3=True\nsentence_4=True\nsentence_5=False\n\n## 8. The Uniform Distribution ##\n\nsentence_1=True\nsentence_2=False\nsentence_3=False\nsentence_4=False","sub_path":"data-visualization-fundamentals/Bar Plots, Histograms, and Distributions-522.py","file_name":"Bar Plots, Histograms, and Distributions-522.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"233202415","text":"#!/usr/bin/env python3\n# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport codecs\nimport unittest\nfrom unittest.mock import patch, mock_open, call\n\nimport generate_policy_source\n\nfrom generate_policy_source import PolicyDetails\n\n\nclass CppGenerationTest(unittest.TestCase):\n\n TEMPLATES_JSON = {\n \"risk_tag_definitions\": [{\n \"name\": \"full-admin-access\",\n \"description\": \"full-admin-access-desc\",\n \"user-description\": \"full-admin-access-user-desc\"\n }],\n \"policy_definitions\": [{\n \"name\": \"ExampleStringPolicy\",\n \"type\": \"string\",\n \"schema\": {\n \"type\": \"string\"\n },\n \"supported_on\": [\"chrome_os:1-\"],\n \"id\": 1,\n \"tags\": [],\n \"caption\": \"ExampleStringPolicy caption\",\n \"desc\": \"ExampleStringPolicy desc\"\n }, {\n \"name\": \"ExampleBoolPolicy\",\n \"type\": \"main\",\n \"schema\": {\n \"type\": \"boolean\"\n },\n \"supported_on\": [\"chrome_os:1-\"],\n \"id\": 2,\n \"tags\": [],\n \"caption\": \"ExampleBoolPolicy caption\",\n \"desc\": \"ExampleBoolPolicy desc\",\n }, {\n \"name\": \"ExampleBoolMergeMetapolicy\",\n \"type\": \"main\",\n \"schema\": {\n \"type\": \"boolean\"\n },\n \"supported_on\": [\"chrome_os:1-\"],\n \"features\": {\n \"metapolicy_type\": \"merge\",\n },\n \"id\": 3,\n \"tags\": [],\n \"caption\": \"ExampleBoolMergeMetapolicy caption\",\n \"desc\": \"ExampleBoolMergeMetapolicy desc\",\n }, {\n \"name\": \"ExampleBoolPrecedenceMetapolicy\",\n \"type\": \"main\",\n \"schema\": {\n \"type\": \"boolean\"\n },\n \"supported_on\": [\"chrome_os:1-\"],\n \"features\": {\n \"metapolicy_type\": \"precedence\",\n },\n \"id\": 4,\n \"tags\": [],\n \"caption\": \"ExampleBoolPrecedenceMetapolicy caption\",\n \"desc\": \"ExampleBoolPrecedenceMetapolicy desc\",\n }],\n \"policy_atomic_group_definitions\": []\n }\n\n def setUp(self):\n self.chrome_major_version = 94\n self.target_platform = 'chrome_os'\n self.all_target_platforms = ['win', 'mac', 'linux', 'chromeos', 'fuchsia']\n self.risk_tags = generate_policy_source.RiskTags(self.TEMPLATES_JSON)\n self.policies = [\n generate_policy_source.PolicyDetails(policy, self.chrome_major_version,\n self.target_platform,\n self.risk_tags.GetValidTags())\n for policy in self.TEMPLATES_JSON['policy_definitions']\n ]\n self.risk_tags.ComputeMaxTags(self.policies)\n\n policy_details_set = list(map((lambda x: x.name), self.policies))\n policies_already_in_group = set()\n self.policy_atomic_groups = [\n generate_policy_source.PolicyAtomicGroup(group, policy_details_set,\n policies_already_in_group)\n for group in self.TEMPLATES_JSON['policy_atomic_group_definitions']\n ]\n\n def testDefaultValueGeneration(self):\n \"\"\"Tests generation of default policy values.\"\"\"\n # Bools\n stmts, expr = generate_policy_source._GenerateDefaultValue(True)\n self.assertListEqual([], stmts)\n self.assertEqual('base::Value(true)', expr)\n stmts, expr = generate_policy_source._GenerateDefaultValue(False)\n self.assertListEqual([], stmts)\n self.assertEqual('base::Value(false)', expr)\n\n # Ints\n stmts, expr = generate_policy_source._GenerateDefaultValue(33)\n self.assertListEqual([], stmts)\n self.assertEqual('base::Value(33)', expr)\n\n # Strings\n stmts, expr = generate_policy_source._GenerateDefaultValue('foo')\n self.assertListEqual([], stmts)\n self.assertEqual('base::Value(\"foo\")', expr)\n\n # Empty list\n stmts, expr = generate_policy_source._GenerateDefaultValue([])\n self.assertListEqual(\n ['base::Value default_value(base::Value::Type::LIST);'], stmts)\n self.assertEqual('std::move(default_value)', expr)\n\n # List with values\n stmts, expr = generate_policy_source._GenerateDefaultValue([1, '2'])\n self.assertListEqual([\n 'base::Value default_value(base::Value::Type::LIST);',\n 'default_value.Append(base::Value(1));',\n 'default_value.Append(base::Value(\"2\"));'\n ], stmts)\n self.assertEqual('std::move(default_value)', expr)\n\n # Recursive lists are not supported.\n stmts, expr = generate_policy_source._GenerateDefaultValue([1, []])\n self.assertListEqual([], stmts)\n self.assertIsNone(expr)\n\n # Arbitary types are not supported.\n stmts, expr = generate_policy_source._GenerateDefaultValue(object())\n self.assertListEqual([], stmts)\n self.assertIsNone(expr)\n\n def testWriteCloudPolicyProtobuf(self):\n is_full_runtime_values = [False, True]\n output_path = 'mock_cloud_policy_proto'\n header_write_call = '''\nsyntax = \"proto2\";\n\n{}option optimize_for = LITE_RUNTIME;\n\npackage enterprise_management;\n\nimport \"policy_common_definitions{}.proto\";\n'''\n\n for is_full_runtime in is_full_runtime_values:\n with patch('codecs.open', mock_open()) as mocked_file:\n with codecs.open(output_path, 'w', encoding='utf-8') as f:\n generate_policy_source._WriteCloudPolicyProtobuf(\n self.policies,\n self.policy_atomic_groups,\n self.target_platform,\n f,\n self.risk_tags,\n is_full_runtime=is_full_runtime)\n\n full_runtime_comment = '//' if is_full_runtime else ''\n full_runtime_suffix = '_full_runtime' if is_full_runtime else ''\n\n with self.subTest(is_full_runtime=is_full_runtime):\n mocked_file.assert_called_once_with(output_path, 'w', encoding='utf-8')\n mocked_file().write.assert_has_calls([\n call(\n header_write_call.format(full_runtime_comment,\n full_runtime_suffix)),\n call('message CloudPolicySettings {\\n'),\n call(' optional StringPolicyProto ExampleStringPolicy = 3;\\n'),\n call(' optional BooleanPolicyProto ExampleBoolPolicy = 4;\\n'),\n call(' optional BooleanPolicyProto '\n 'ExampleBoolMergeMetapolicy = 5;\\n'),\n call(' optional BooleanPolicyProto '\n 'ExampleBoolPrecedenceMetapolicy = 6;\\n'),\n call('}\\n\\n'),\n ])\n\n def testWriteChromeSettingsProtobuf(self):\n is_full_runtime_values = [False, True]\n output_path = 'mock_chrome_settings_proto'\n header_write_call = '''\nsyntax = \"proto2\";\n\n{}option optimize_for = LITE_RUNTIME;\n\npackage enterprise_management;\n\n// For StringList and PolicyOptions.\nimport \"policy_common_definitions{}.proto\";\n\n'''\n\n for is_full_runtime in is_full_runtime_values:\n with patch('codecs.open', mock_open()) as mocked_file:\n with codecs.open(output_path, 'w', encoding='utf-8') as f:\n generate_policy_source._WriteChromeSettingsProtobuf(\n self.policies,\n self.policy_atomic_groups,\n self.target_platform,\n f,\n self.risk_tags,\n is_full_runtime=is_full_runtime)\n\n full_runtime_comment = '//' if is_full_runtime else ''\n full_runtime_suffix = '_full_runtime' if is_full_runtime else ''\n\n with self.subTest(is_full_runtime=is_full_runtime):\n mocked_file.assert_called_once_with(output_path, 'w', encoding='utf-8')\n mocked_file().write.assert_has_calls([\n call(\n header_write_call.format(full_runtime_comment,\n full_runtime_suffix)),\n call('// PBs for individual settings.\\n\\n'),\n call('// ExampleStringPolicy caption'),\n call('\\n'),\n call('//'),\n call('\\n'),\n call('// ExampleStringPolicy desc'),\n call('\\n'),\n call('//'),\n call('\\n'),\n call('// Supported on: chrome_os'),\n call('\\n'),\n call('message ExampleStringPolicyProto {\\n'),\n call(' optional PolicyOptions policy_options = 1;\\n'),\n call(' optional string ExampleStringPolicy = 2;\\n'),\n call('}\\n\\n'),\n call('// ExampleBoolPolicy caption'),\n call('\\n'),\n call('//'),\n call('\\n'),\n call('// ExampleBoolPolicy desc'),\n call('\\n'),\n call('//'),\n call('\\n'),\n call('// Supported on: chrome_os'),\n call('\\n'),\n call('message ExampleBoolPolicyProto {\\n'),\n call(' optional PolicyOptions policy_options = 1;\\n'),\n call(' optional bool ExampleBoolPolicy = 2;\\n'),\n call('}\\n\\n'),\n call('// ExampleBoolMergeMetapolicy caption'),\n call('\\n'),\n call('//'),\n call('\\n'),\n call('// ExampleBoolMergeMetapolicy desc'),\n call('\\n'),\n call('//'),\n call('\\n'),\n call('// Supported on: chrome_os'),\n call('\\n'),\n call('message ExampleBoolMergeMetapolicyProto {\\n'),\n call(' optional PolicyOptions policy_options = 1;\\n'),\n call(' optional bool ExampleBoolMergeMetapolicy = 2;\\n'),\n call('}\\n\\n'),\n call('// ExampleBoolPrecedenceMetapolicy caption'),\n call('\\n'),\n call('//'),\n call('\\n'),\n call('// ExampleBoolPrecedenceMetapolicy desc'),\n call('\\n'),\n call('//'),\n call('\\n'),\n call('// Supported on: chrome_os'),\n call('\\n'),\n call('message ExampleBoolPrecedenceMetapolicyProto {\\n'),\n call(' optional PolicyOptions policy_options = 1;\\n'),\n call(' optional bool ExampleBoolPrecedenceMetapolicy = 2;\\n'),\n call('}\\n\\n'),\n call('''// --------------------------------------------------\n// Big wrapper PB containing the above groups.\n\nmessage ChromeSettingsProto {\n'''),\n call(\n ' optional ExampleStringPolicyProto ExampleStringPolicy = 3;\\n'\n ' optional ExampleBoolPolicyProto ExampleBoolPolicy = 4;\\n'\n ' optional ExampleBoolMergeMetapolicyProto '\n 'ExampleBoolMergeMetapolicy = 5;\\n'\n ' optional ExampleBoolPrecedenceMetapolicyProto '\n 'ExampleBoolPrecedenceMetapolicy = 6;\\n'),\n call('}\\n\\n'),\n ])\n\n def testGetMetapoliciesOfType(self):\n merge_metapolicies = generate_policy_source._GetMetapoliciesOfType(\n self.policies, \"merge\")\n self.assertListEqual([\"ExampleBoolMergeMetapolicy\"], merge_metapolicies)\n self.assertEqual(1, len(merge_metapolicies))\n\n precedence_metapolicies = generate_policy_source._GetMetapoliciesOfType(\n self.policies, \"precedence\")\n self.assertListEqual([\"ExampleBoolPrecedenceMetapolicy\"],\n precedence_metapolicies)\n self.assertEqual(1, len(precedence_metapolicies))\n\n invalid_metapolicies = generate_policy_source._GetMetapoliciesOfType(\n self.policies, \"invalid\")\n self.assertListEqual([], invalid_metapolicies)\n self.assertEqual(0, len(invalid_metapolicies))\n\n def testWritePolicyConstantHeader(self):\n output_path = 'mock_policy_constants_h'\n expected_file_calls_default_first_part = [\n call('''\\\n#ifndef COMPONENTS_POLICY_POLICY_CONSTANTS_H_\n#define COMPONENTS_POLICY_POLICY_CONSTANTS_H_\n\n#include \n#include \n\n#include \"components/policy/core/common/policy_details.h\"\n#include \"components/policy/core/common/policy_map.h\"\n#include \"components/policy/proto/cloud_policy.pb.h\"\n\nnamespace policy {\n\nnamespace internal {\nstruct SchemaData;\n}\n\n''')\n ]\n expected_file_calls_default_win_part = [\n call('''\\\n// The windows registry path where Chrome policy configuration resides.\nextern const wchar_t kRegistryChromePolicyKey[];\n''')\n ]\n expected_file_calls_default_second_part = [\n call('''\\\n#if defined(OS_CHROMEOS)\n// Sets default profile policies values for enterprise users.\nvoid SetEnterpriseUsersProfileDefaults(PolicyMap* policy_map);\n// Sets default system-wide policies values for enterprise users.\nvoid SetEnterpriseUsersSystemWideDefaults(PolicyMap* policy_map);\n// Sets all default values for enterprise users.\nvoid SetEnterpriseUsersDefaults(PolicyMap* policy_map);\n#endif\n\n// Returns the PolicyDetails for |policy| if |policy| is a known\n// Chrome policy, otherwise returns nullptr.\nconst PolicyDetails* GetChromePolicyDetails(\nconst std::string& policy);\n\n// Returns the schema data of the Chrome policy schema.\nconst internal::SchemaData* GetChromeSchemaData();\n\n'''),\n call('// Key names for the policy settings.\\nnamespace key {\\n\\n'),\n call('extern const char kExampleStringPolicy[];\\n'),\n call('extern const char kExampleBoolPolicy[];\\n'),\n call('extern const char kExampleBoolMergeMetapolicy[];\\n'),\n call('extern const char kExampleBoolPrecedenceMetapolicy[];\\n'),\n call('\\n} // namespace key\\n\\n'),\n call('// Group names for the policy settings.\\nnamespace group {\\n\\n'),\n call('\\n} // namespace group\\n\\n'),\n call('struct AtomicGroup {\\n'\n ' const short id;\\n'\n ' const char* policy_group;\\n'\n ' const char* const* policies;\\n'\n '};\\n\\n'),\n call('extern const AtomicGroup kPolicyAtomicGroupMappings[];\\n\\n'),\n call('extern const size_t kPolicyAtomicGroupMappingsLength;\\n\\n'),\n call('// Arrays of metapolicies.\\nnamespace metapolicy {\\n\\n'),\n call('extern const char* kMerge[1];\\n'),\n call('extern const char* kPrecedence[1];\\n\\n'),\n call('} // namespace metapolicy\\n\\n'),\n call('enum class StringPolicyType {\\n'\n ' STRING,\\n JSON,\\n EXTERNAL,\\n'\n '};\\n\\n'),\n call('''\\\n// Read access to the protobufs of all supported boolean user policies.\n'''),\n call('struct BooleanPolicyAccess {\\n'),\n call('''\\\n const char* policy_key;\n bool per_profile;\n bool (enterprise_management::CloudPolicySettings::*has_proto)() const;\n const enterprise_management::BooleanPolicyProto&\n (enterprise_management::CloudPolicySettings::*get_proto)() const;\n'''),\n call('};\\n'),\n call('extern const BooleanPolicyAccess kBooleanPolicyAccess[];\\n\\n'),\n call('''\\\n// Read access to the protobufs of all supported integer user policies.\n'''),\n call('struct IntegerPolicyAccess {\\n'),\n call('''\\\n const char* policy_key;\n bool per_profile;\n bool (enterprise_management::CloudPolicySettings::*has_proto)() const;\n const enterprise_management::IntegerPolicyProto&\n (enterprise_management::CloudPolicySettings::*get_proto)() const;\n'''),\n call('};\\n'),\n call('extern const IntegerPolicyAccess kIntegerPolicyAccess[];\\n\\n'),\n call('''\\\n// Read access to the protobufs of all supported string user policies.\n'''),\n call('struct StringPolicyAccess {\\n'),\n call('''\\\n const char* policy_key;\n bool per_profile;\n bool (enterprise_management::CloudPolicySettings::*has_proto)() const;\n const enterprise_management::StringPolicyProto&\n (enterprise_management::CloudPolicySettings::*get_proto)() const;\n'''),\n call(' const StringPolicyType type;\\n'),\n call('};\\n'),\n call('extern const StringPolicyAccess kStringPolicyAccess[];\\n\\n'),\n call('''\\\n// Read access to the protobufs of all supported stringlist user policies.\n'''),\n call('struct StringListPolicyAccess {\\n'),\n call('''\\\n const char* policy_key;\n bool per_profile;\n bool (enterprise_management::CloudPolicySettings::*has_proto)() const;\n const enterprise_management::StringListPolicyProto&\n (enterprise_management::CloudPolicySettings::*get_proto)() const;\n'''),\n call('};\\n'),\n call('extern const StringListPolicyAccess '\n 'kStringListPolicyAccess[];\\n\\n'),\n call('constexpr int64_t '\n 'kDevicePolicyExternalDataResourceCacheSize = 0;\\n'),\n call('''\\\n\n} // namespace policy\n\n#endif // COMPONENTS_POLICY_POLICY_CONSTANTS_H_\n''')\n ]\n\n expected_file_calls_default = (expected_file_calls_default_first_part +\n expected_file_calls_default_second_part)\n # Win header has special lines after 'struct SchemaData;' declaration.\n expected_file_calls_win = (expected_file_calls_default_first_part +\n expected_file_calls_default_win_part +\n expected_file_calls_default_second_part)\n\n expected_file_calls = {\n platform: expected_file_calls_default\n for platform in self.all_target_platforms\n }\n expected_file_calls['win'] = expected_file_calls_win\n\n for target_platform in self.all_target_platforms:\n with patch('codecs.open', mock_open()) as mocked_file:\n with codecs.open(output_path, 'w', encoding='utf-8') as f:\n generate_policy_source._WritePolicyConstantHeader(\n self.policies,\n self.policy_atomic_groups,\n target_platform,\n f,\n self.risk_tags,\n )\n with self.subTest(target_platform=target_platform):\n mocked_file.assert_called_once_with(output_path, 'w', encoding='utf-8')\n mocked_file().write.assert_has_calls(\n expected_file_calls[target_platform])\n\n def testWritePolicyConstantSource(self):\n output_path = 'mock_policy_constants_cc'\n\n expected_file_calls_default_first_part = [\n call('''\\\n#include \"components/policy/policy_constants.h\"\n\n#include \n#include \n#include \n\n#include \"base/check_op.h\"\n#include \"base/stl_util.h\" // base::size()\n#include \"base/values.h\"\n#include \"build/branding_buildflags.h\"\n#include \"components/policy/core/common/policy_types.h\"\n#include \"components/policy/core/common/schema_internal.h\"\n#include \"components/policy/proto/cloud_policy.pb.h\"\n#include \"components/policy/risk_tag.h\"\n\nnamespace em = enterprise_management;\n\nnamespace policy {\n\n'''),\n call('''\\\nconst __attribute__((unused)) PolicyDetails kChromePolicyDetails[] = {\n// is_deprecated is_future is_device_policy id max_external_data_size, risk tags\n'''),\n call(' // ExampleStringPolicy\\n'),\n # No actual new lines below, just a split of long line.\n call(' { false, false, false,'\n ' 1, 0, { } },\\n'),\n call(' // ExampleBoolPolicy\\n'),\n call(' { false, false, false,'\n ' 2, 0, { } },\\n'),\n call(' // ExampleBoolMergeMetapolicy\\n'),\n call(' { false, false, false,'\n ' 3, 0, { } },\\n'),\n call(' // ExampleBoolPrecedenceMetapolicy\\n'),\n call(' { false, false, false,'\n ' 4, 0, { } },\\n'),\n call('};\\n\\n'),\n call('''\\\nconst internal::SchemaNode kSchemas[] = {\n// Type Extra IsSensitiveValue HasSensitiveChildren\n'''),\n # No actual new lines below, just a split of long line.\n call(' { base::Value::Type::DICTIONARY, '\n '0, false, false }, // root node\\n'),\n call(' { base::Value::Type::BOOLEAN, '\n '-1, false, false }, // simple type: boolean\\n'),\n call(' { base::Value::Type::STRING, '\n '-1, false, false }, // simple type: string\\n'),\n call('};\\n\\n'),\n call('''\\\nconst internal::PropertyNode kPropertyNodes[] = {\n// Property Schema\n'''),\n # No actual new lines below, just a split of long line.\n call(' { key::kExampleBoolMergeMetapolicy,'\n ' 1 },\\n'),\n call(' { key::kExampleBoolPolicy,'\n ' 1 },\\n'),\n call(' { key::kExampleBoolPrecedenceMetapolicy,'\n ' 1 },\\n'),\n call(' { key::kExampleStringPolicy,'\n ' 2 },\\n'),\n call('};\\n\\n'),\n call('''\\\nconst internal::PropertiesNode kProperties[] = {\n// Begin End PatternEnd RequiredBegin RequiredEnd Additional Properties\n'''),\n # No actual new lines below, just a split of long line.\n call(' { 0, 4, 4,'\n ' 0, 0, -1 }, // root node\\n'),\n call('};\\n\\n'),\n call('const internal::SchemaData* GetChromeSchemaData() {\\n'),\n call('''\\\n static const internal::SchemaData kChromeSchemaData = {\n kSchemas,\n'''),\n call(' kPropertyNodes,\\n'),\n call(' kProperties,\\n'),\n call(' nullptr,\\n'),\n call(' nullptr,\\n'),\n call(' nullptr,\\n'),\n call(' nullptr,\\n'),\n call(' -1, // validation_schema root index\\n'),\n call(' };\\n\\n'),\n call(' return &kChromeSchemaData;\\n}\\n\\n'),\n call('\\n'),\n call('namespace {\\n'),\n call('''\\\nbool CompareKeys(const internal::PropertyNode& node,\n const std::string& key) {\n return node.key < key;\n}\n\n'''),\n call('} // namespace\\n\\n')\n ]\n\n expected_file_calls_default_win_part = [\n call('''\\\n#if BUILDFLAG(GOOGLE_CHROME_BRANDING)\nconst wchar_t kRegistryChromePolicyKey[] = \\\nL\"SOFTWARE\\\\\\\\Policies\\\\\\\\Google\\\\\\\\Chrome\";\n#else\nconst wchar_t kRegistryChromePolicyKey[] = L\"SOFTWARE\\\\\\\\Policies\\\\\\\\Chromium\";\n#endif\n\n''')\n ]\n\n expected_file_calls_default_second_part = [\n call('#if defined(OS_CHROMEOS)'),\n # Note no \\ and new lines in three calls below.\n call('''\nvoid SetEnterpriseUsersProfileDefaults(PolicyMap* policy_map) {\n\n}\n'''),\n call('''\nvoid SetEnterpriseUsersSystemWideDefaults(PolicyMap* policy_map) {\n\n}\n'''),\n call('''\nvoid SetEnterpriseUsersDefaults(PolicyMap* policy_map) {\n SetEnterpriseUsersProfileDefaults(policy_map);\n SetEnterpriseUsersSystemWideDefaults(policy_map);\n}\n'''),\n call('#endif\\n\\n'),\n call('''\\\nconst PolicyDetails* GetChromePolicyDetails(const std::string& policy) {\n'''),\n call('''\\\n // First index in kPropertyNodes of the Chrome policies.\n static const int begin_index = 0;\n // One-past-the-end of the Chrome policies in kPropertyNodes.\n static const int end_index = 4;\n'''),\n call(\"\"\"\\\n const internal::PropertyNode* begin =\n kPropertyNodes + begin_index;\n const internal::PropertyNode* end = kPropertyNodes + end_index;\n const internal::PropertyNode* it =\n std::lower_bound(begin, end, policy, CompareKeys);\n if (it == end || it->key != policy)\n return nullptr;\n // This relies on kPropertyNodes from begin_index to end_index\n // having exactly the same policies (and in the same order) as\n // kChromePolicyDetails, so that binary searching on the first\n // gets the same results as a binary search on the second would.\n // However, kPropertyNodes has the policy names and\n // kChromePolicyDetails doesn't, so we obtain the index into\n // the second array by searching the first to avoid duplicating\n // the policy name pointers.\n // Offsetting |it| from |begin| here obtains the index we're\n // looking for.\n size_t index = it - begin;\n CHECK_LT(index, base::size(kChromePolicyDetails));\n return kChromePolicyDetails + index;\n\"\"\"),\n call('}\\n\\n'),\n call('namespace key {\\n\\n'),\n call('const char kExampleStringPolicy[] = \"ExampleStringPolicy\";\\n'),\n call('const char kExampleBoolPolicy[] = \"ExampleBoolPolicy\";\\n'),\n call('const char kExampleBoolMergeMetapolicy[] = '\n '\"ExampleBoolMergeMetapolicy\";\\n'),\n call('const char kExampleBoolPrecedenceMetapolicy[] = '\n '\"ExampleBoolPrecedenceMetapolicy\";\\n'),\n call('\\n} // namespace key\\n\\n'),\n call('namespace group {\\n\\n'),\n call('\\n'),\n call('namespace {\\n\\n'),\n call('\\n} // namespace\\n'),\n call('\\n} // namespace group\\n\\n'),\n call('const AtomicGroup kPolicyAtomicGroupMappings[] = {\\n'),\n call('};\\n\\n'),\n call('const size_t kPolicyAtomicGroupMappingsLength = 0;\\n\\n'),\n call('namespace metapolicy {\\n\\n'),\n call('const char* kMerge[1] = {\\n'),\n call(' key::kExampleBoolMergeMetapolicy,\\n'),\n call('};\\n\\n'),\n call('const char* kPrecedence[1] = {\\n'),\n call(' key::kExampleBoolPrecedenceMetapolicy,\\n'),\n call('};\\n\\n'),\n call('} // namespace metapolicy\\n\\n'),\n call('const BooleanPolicyAccess kBooleanPolicyAccess[] = {\\n'),\n call('''\\\n {key::kExampleBoolPolicy,\n false,\n &em::CloudPolicySettings::has_exampleboolpolicy,\n &em::CloudPolicySettings::exampleboolpolicy},\n'''),\n call('''\\\n {key::kExampleBoolMergeMetapolicy,\n false,\n &em::CloudPolicySettings::has_exampleboolmergemetapolicy,\n &em::CloudPolicySettings::exampleboolmergemetapolicy},\n'''),\n call('''\\\n {key::kExampleBoolPrecedenceMetapolicy,\n false,\n &em::CloudPolicySettings::has_exampleboolprecedencemetapolicy,\n &em::CloudPolicySettings::exampleboolprecedencemetapolicy},\n'''),\n call(' {nullptr, false, nullptr, nullptr},\\n};\\n\\n'),\n call('const IntegerPolicyAccess kIntegerPolicyAccess[] = {\\n'),\n call(' {nullptr, false, nullptr, nullptr},\\n};\\n\\n'),\n call('const StringPolicyAccess kStringPolicyAccess[] = {\\n'),\n call('''\\\n {key::kExampleStringPolicy,\n false,\n &em::CloudPolicySettings::has_examplestringpolicy,\n &em::CloudPolicySettings::examplestringpolicy,\n StringPolicyType::STRING},\n'''),\n call(' {nullptr, false, nullptr, nullptr},\\n};\\n\\n'),\n call('const StringListPolicyAccess kStringListPolicyAccess[] = {\\n'),\n call(' {nullptr, false, nullptr, nullptr},\\n};\\n\\n'),\n call('\\n} // namespace policy\\n')\n ]\n\n expected_file_calls_default = (expected_file_calls_default_first_part +\n expected_file_calls_default_second_part)\n # Win source has special lines after 'CompareKeys' implementations.\n expected_file_calls_win = (expected_file_calls_default_first_part +\n expected_file_calls_default_win_part +\n expected_file_calls_default_second_part)\n\n expected_file_calls = {\n platform: expected_file_calls_default\n for platform in self.all_target_platforms\n }\n expected_file_calls['win'] = expected_file_calls_win\n\n for target_platform in self.all_target_platforms:\n with patch('codecs.open', mock_open()) as mocked_file:\n with codecs.open(output_path, 'w', encoding='utf-8') as f:\n generate_policy_source._WritePolicyConstantSource(\n self.policies,\n self.policy_atomic_groups,\n target_platform,\n f,\n self.risk_tags,\n )\n with self.subTest(target_platform=target_platform):\n mocked_file.assert_called_once_with(output_path, 'w', encoding='utf-8')\n mocked_file().write.assert_has_calls(\n expected_file_calls[target_platform])\n\n def testWriteChromeOSPolicyConstantsHeader(self):\n output_path = 'mock_policy_constants_h'\n with patch('codecs.open', mock_open()) as mocked_file:\n with codecs.open(output_path, 'w', encoding='utf-8') as f:\n generate_policy_source._WriteChromeOSPolicyConstantsHeader(\n self.policies,\n self.policy_atomic_groups,\n self.target_platform,\n f,\n self.risk_tags,\n )\n with self.subTest():\n mocked_file.assert_called_once_with(output_path, 'w', encoding='utf-8')\n mocked_file().write.assert_has_calls([\n call('''\\\n#ifndef __BINDINGS_POLICY_CONSTANTS_H_\n#define __BINDINGS_POLICY_CONSTANTS_H_\n\n'''),\n call('namespace enterprise_management {\\n'\n 'class CloudPolicySettings;\\n'),\n call('class BooleanPolicyProto;\\n'),\n call('class IntegerPolicyProto;\\n'),\n call('class StringPolicyProto;\\n'),\n call('class StringListPolicyProto;\\n'),\n call('} // namespace enterprise_management\\n\\n'),\n call('namespace policy {\\n\\n'),\n call('''\\\n// Registry key names for user and device policies.\nnamespace key {\n\n'''),\n call('extern const char kExampleStringPolicy[];\\n'),\n call('extern const char kExampleBoolPolicy[];\\n'),\n call('extern const char kExampleBoolMergeMetapolicy[];\\n'),\n call('extern const char kExampleBoolPrecedenceMetapolicy[];\\n'),\n call('\\n} // namespace key\\n\\n'),\n call(\n '// NULL-terminated list of device policy registry key names.\\n'),\n call('extern const char* kDevicePolicyKeys[];\\n\\n'),\n call('''\\\n// Access to the mutable protobuf function of all supported boolean user\n// policies.\n'''),\n call('''\\\nstruct BooleanPolicyAccess {\n const char* policy_key;\n bool per_profile;\n enterprise_management::BooleanPolicyProto*\n (enterprise_management::CloudPolicySettings::*mutable_proto_ptr)();\n};\n'''),\n call('extern const BooleanPolicyAccess kBooleanPolicyAccess[];\\n\\n'),\n call('''\\\n// Access to the mutable protobuf function of all supported integer user\n// policies.\n'''),\n call('''\\\nstruct IntegerPolicyAccess {\n const char* policy_key;\n bool per_profile;\n enterprise_management::IntegerPolicyProto*\n (enterprise_management::CloudPolicySettings::*mutable_proto_ptr)();\n};\n'''),\n call('extern const IntegerPolicyAccess kIntegerPolicyAccess[];\\n\\n'),\n call('''\\\n// Access to the mutable protobuf function of all supported string user\n// policies.\n'''),\n call('''\\\nstruct StringPolicyAccess {\n const char* policy_key;\n bool per_profile;\n enterprise_management::StringPolicyProto*\n (enterprise_management::CloudPolicySettings::*mutable_proto_ptr)();\n};\n'''),\n call('extern const StringPolicyAccess kStringPolicyAccess[];\\n\\n'),\n call('''\\\n// Access to the mutable protobuf function of all supported stringlist user\n// policies.\n'''),\n call('''\\\nstruct StringListPolicyAccess {\n const char* policy_key;\n bool per_profile;\n enterprise_management::StringListPolicyProto*\n (enterprise_management::CloudPolicySettings::*mutable_proto_ptr)();\n};\n'''),\n call('''\\\nextern const StringListPolicyAccess kStringListPolicyAccess[];\n\n'''),\n call('''\\\n} // namespace policy\n\n#endif // __BINDINGS_POLICY_CONSTANTS_H_\n''')\n ])\n\n def testWriteChromeOSPolicyConstantsSource(self):\n output_path = 'mock_policy_constants_cc'\n with patch('codecs.open', mock_open()) as mocked_file:\n with codecs.open(output_path, 'w', encoding='utf-8') as f:\n generate_policy_source._WriteChromeOSPolicyConstantsSource(\n self.policies,\n self.policy_atomic_groups,\n self.target_platform,\n f,\n self.risk_tags,\n )\n with self.subTest():\n mocked_file.assert_called_once_with(output_path, 'w', encoding='utf-8')\n mocked_file().write.assert_has_calls([\n call('''\\\n#include \"bindings/cloud_policy.pb.h\"\n#include \"bindings/policy_constants.h\"\n\nnamespace em = enterprise_management;\n\nnamespace policy {\n\n'''),\n call('namespace key {\\n\\n'),\n call('const char kExampleStringPolicy[] = \"ExampleStringPolicy\";\\n'),\n call('const char kExampleBoolPolicy[] = \"ExampleBoolPolicy\";\\n'),\n call('const char kExampleBoolMergeMetapolicy[] = '\n '\"ExampleBoolMergeMetapolicy\";\\n'),\n call('const char kExampleBoolPrecedenceMetapolicy[] = '\n '\"ExampleBoolPrecedenceMetapolicy\";\\n'),\n call('\\n} // namespace key\\n\\n'),\n call('const char* kDevicePolicyKeys[] = {\\n\\n'),\n call(' nullptr};\\n\\n'),\n call('constexpr BooleanPolicyAccess kBooleanPolicyAccess[] = {\\n'),\n call('''\\\n {key::kExampleBoolPolicy,\n false,\n &em::CloudPolicySettings::mutable_exampleboolpolicy},\n'''),\n call('''\\\n {key::kExampleBoolMergeMetapolicy,\n false,\n &em::CloudPolicySettings::mutable_exampleboolmergemetapolicy},\n'''),\n call('''\\\n {key::kExampleBoolPrecedenceMetapolicy,\n false,\n &em::CloudPolicySettings::mutable_exampleboolprecedencemetapolicy},\n'''),\n call(' {nullptr, false, nullptr},\\n};\\n\\n'),\n call('constexpr IntegerPolicyAccess kIntegerPolicyAccess[] = {\\n'),\n call(' {nullptr, false, nullptr},\\n};\\n\\n'),\n call('constexpr StringPolicyAccess kStringPolicyAccess[] = {\\n'),\n call('''\\\n {key::kExampleStringPolicy,\n false,\n &em::CloudPolicySettings::mutable_examplestringpolicy},\n'''),\n call(' {nullptr, false, nullptr},\\n};\\n\\n'),\n call(\n 'constexpr StringListPolicyAccess kStringListPolicyAccess[] = {\\n'\n ),\n call(' {nullptr, false, nullptr},\\n};\\n\\n'),\n call('} // namespace policy\\n')\n ])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"components/policy/tools/generate_policy_source_test.py","file_name":"generate_policy_source_test.py","file_ext":"py","file_size_in_byte":33566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"417456166","text":"class Solution:\n\tdef myAtoi(self, str: str) -> int:\n\t\tres, sign = 0, 1\n\t\tgetSign = False\n\t\tfor c in str:\n\t\t\tif c == \" \" and res == 0 and not getSign:\n\t\t\t\tcontinue\n\t\t\telif c == \"-\" and res == 0 and not getSign:\n\t\t\t\tsign = -1\n\t\t\t\tgetSign = True\n\t\t\telif c == \"+\" and res == 0 and not getSign:\n\t\t\t\tsign = 1\n\t\t\t\tgetSign = True\n\t\t\telif c.isdigit():\n\t\t\t\td = int(c)\n\t\t\t\tres = res * 10 + d\n\t\t\t\tgetSign = True\n\t\t\telse:\n\t\t\t\tbreak\n\t\tINT_MAX = pow(2,31) - 1\n\t\tINT_MIN = -pow(2,31)\n\t\tif sign == 1:\n\t\t\tres = res if res <= INT_MAX else INT_MAX\n\t\telse:\n\t\t\tres = res * sign if res * sign >= -pow(2,31) else INT_MIN\n\t\treturn res\n\n\n","sub_path":"string/string-to-integer-atoi.py","file_name":"string-to-integer-atoi.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"547558519","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2012 Smile (). All Rights Reserved\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom openerp.tools.safe_eval import safe_eval as eval\nfrom openerp.tools import float_round\n\nfrom tools import get_date, get_fiscalyear_start_date, get_fiscalyear_stop_date, \\\n get_period_start_date, get_period_stop_date, get_prorata_temporis, get_prorata_temporis_by_period, \\\n get_depreciation_period_dates\n\n\nclass DepreciationBoard(object):\n\n def __init__(self, method_info, purchase_value, annuities, rate=0.0, salvage_value=0.0, depreciation_start_date=None,\n sale_date=None, depreciation_period=12, fiscalyear_start_day='01-01', board_stop_date=None, rounding=2,\n readonly_values=None, exceptional_values=None):\n assert depreciation_period in (1, 2, 3, 4, 6, 12), 'depreciation_period must be in (1, 2, 3, 4, 6, 12)'\n self.depreciation_period = depreciation_period\n self.method_info = DepreciationBoard.check_and_format_method_info(method_info)\n self.purchase_value = purchase_value\n self.salvage_value = method_info['use_salvage_value'] and salvage_value or 0.0\n self.rate = rate\n self.depreciation_start_date = get_date(depreciation_start_date, datetime.today())\n self.sale_date = get_date(sale_date)\n self.fiscalyear_start_day = fiscalyear_start_day\n self.rounding = rounding\n self.readonly_values = DepreciationBoard.check_and_format_vals(readonly_values, 'readonly_values')\n self.exceptional_values = DepreciationBoard.check_and_format_vals(exceptional_values, 'exceptional_values')\n self.initial_annuities = annuities\n self.need_additional_annuity = method_info['need_additional_annuity'] \\\n and self.depreciation_start_date.strftime('%m-%d') != fiscalyear_start_day\n self.board_stop_date = get_date(board_stop_date)\n self.first_yearly_depreciation_date = get_fiscalyear_stop_date(self.depreciation_start_date, self.fiscalyear_start_day)\n if self.sale_date and self.sale_date < self.first_yearly_depreciation_date:\n self.first_yearly_depreciation_date = self.sale_date\n self.reset()\n\n def reset(self):\n self.lines = []\n self.yearly_lines = []\n self.annuities = self.initial_annuities\n self.total_annuities = self.board_stop_date.year - self.depreciation_start_date.year + 1 if self.board_stop_date \\\n else self.initial_annuities + self.need_additional_annuity\n self.annuity_number = 1\n fiscalyear_start_date = get_fiscalyear_start_date(self.depreciation_start_date, self.fiscalyear_start_day)\n exceptional_value_before_depreciation_start_date = sum([self.exceptional_values[month] for month in self.exceptional_values\n if month < fiscalyear_start_date.strftime('%Y-%m')], 0)\n self.book_value = self.purchase_value - exceptional_value_before_depreciation_start_date\n self.book_value_wo_exceptional = self.purchase_value - exceptional_value_before_depreciation_start_date\n self.base_value = self.purchase_value - self.salvage_value - exceptional_value_before_depreciation_start_date\n self.accumulated_value = 0.0\n self.accumulated_exceptional_value = exceptional_value_before_depreciation_start_date\n self.next_depreciation_date = self.first_yearly_depreciation_date\n self.reset_partially = False\n\n @staticmethod\n def check_and_format_method_info(method_info):\n if not isinstance(method_info, dict):\n raise TypeError(\"method_info must be a dictionnary\")\n missing_keys = []\n for key in ('base_value', 'use_salvage_value', 'use_manual_rate', 'rate_formula', 'prorata', 'need_additional_annuity'):\n if key not in method_info:\n missing_keys.append(key)\n if missing_keys:\n raise KeyError(\"The following keys are missing in method_info dict: %s\" % missing_keys)\n return method_info\n\n @staticmethod\n def check_and_format_vals(vals, dict_name):\n error_msg = '%s keys must be strings at format YYYY-MM' % dict_name\n vals = vals or {}\n for k in vals:\n if not isinstance(k, basestring):\n raise ValueError(error_msg)\n else:\n try:\n datetime.strptime(k, '%Y-%m')\n except ValueError:\n raise ValueError(error_msg)\n if dict_name == 'exceptional_values':\n if isinstance(vals[k], (int, long)):\n vals[k] = float(vals[k])\n if not isinstance(vals[k], float):\n raise ValueError('%s values must be floats' % dict_name)\n if dict_name == 'readonly_values':\n error_msg2 = \"%s values must be dictionaries {'depreciation_value': float, 'base_value': float}\" % dict_name\n if not isinstance(vals[k], dict):\n raise ValueError(error_msg2)\n else:\n for k2 in ('depreciation_value', 'base_value'):\n if k2 not in vals[k] or not isinstance(vals[k][k2], float):\n raise ValueError(error_msg2)\n return vals\n\n def compute(self):\n self.reset()\n break_loop = False\n while not break_loop and self.annuity_number <= self.total_annuities and \\\n (not self.sale_date or self.next_depreciation_date <= self.sale_date):\n if self.next_depreciation_date == self.sale_date:\n break_loop = True\n self.yearly_lines.append(self._get_next_yearly_line())\n self.annuity_number += 1\n for yearly_line in self.yearly_lines:\n self.lines.extend(yearly_line.get_periodical_lines(self))\n return self.get_lines()\n\n def _compute_depreciation_rate(self):\n localdict = {'length': float(self.annuities), 'annuity_number': float(self.annuity_number)}\n if self.method_info['use_manual_rate']:\n localdict['rate'] = self.rate\n return eval(self.method_info['rate_formula'], localdict)\n\n def _get_prorata_temporis(self):\n if self.method_info['prorata']:\n if self.annuity_number == 1 and self.next_depreciation_date == self.first_yearly_depreciation_date:\n prorata = get_prorata_temporis(self.depreciation_start_date, self.fiscalyear_start_day, 12)\n if self.sale_date == self.next_depreciation_date:\n prorata += get_prorata_temporis(self.sale_date, self.fiscalyear_start_day, 12, opposite=True) - 1.0\n return prorata\n if self.annuity_number > self.annuities + self.need_additional_annuity:\n return 0.0\n if self.sale_date and self.next_depreciation_date == self.sale_date:\n return get_prorata_temporis(self.sale_date, self.fiscalyear_start_day, 12, opposite=True)\n if self.sale_date and self.next_depreciation_date > self.sale_date: # TODO: check if useful\n return 0.0\n return 1.0\n\n def _compute_depreciation_value(self):\n if self.annuity_number >= self.annuities + self.need_additional_annuity:\n return self.book_value - self.salvage_value if self.book_value else 0.0\n return float_round(self.base_value * self._compute_depreciation_rate() / 100.0 * self._get_prorata_temporis(), precision_digits=self.rounding)\n\n def _get_readonly_value(self):\n depreciation_value, readonly = 0.0, False\n last_year_month = self.next_depreciation_date.strftime('%Y-%m')\n if last_year_month in self.readonly_values:\n readonly = True\n depreciation_value = 0.0\n fiscalyear_start_date = self.next_depreciation_date + relativedelta(years=-1, days=1)\n for month in self.readonly_values:\n if fiscalyear_start_date.strftime('%Y-%m') <= month <= self.next_depreciation_date.strftime('%Y-%m'):\n depreciation_value += self.readonly_values[month]['depreciation_value']\n if round(self.readonly_values[last_year_month]['base_value'], self.rounding) != round(self.base_value, self.rounding):\n # INFO: means that method changes occured\n self.base_value = self.readonly_values[last_year_month]['base_value']\n self.reset_partially = True\n return depreciation_value, readonly\n\n def _get_exceptional_value(self):\n exceptional_value = 0.0\n fiscalyear_start_date = self.next_depreciation_date + relativedelta(years=-1, days=1)\n for month in self.exceptional_values:\n if fiscalyear_start_date.strftime('%Y-%m') <= month <= self.next_depreciation_date.strftime('%Y-%m'):\n exceptional_value += self.exceptional_values[month]\n self.reset_partially = True\n return exceptional_value\n\n def _get_next_yearly_line(self):\n depreciation_value, readonly = self._get_readonly_value()\n if not readonly:\n depreciation_value = self._compute_depreciation_value()\n self.accumulated_value += depreciation_value\n exceptional_value = self._get_exceptional_value()\n self.accumulated_exceptional_value += exceptional_value\n self.book_value_wo_exceptional = self.purchase_value - self.accumulated_value\n self.book_value = self.book_value_wo_exceptional - self.accumulated_exceptional_value\n vals = {\n 'depreciation_date': self.next_depreciation_date,\n 'base_value': self.base_value,\n 'depreciation_value': depreciation_value,\n 'accumulated_value': self.accumulated_value,\n 'exceptional_value': exceptional_value,\n 'book_value': self.book_value,\n 'book_value_wo_exceptional': self.book_value_wo_exceptional,\n 'rounding': self.rounding,\n 'readonly': readonly,\n }\n self._compute_next_values()\n return DepreciationBoardLine(**vals)\n\n def _compute_next_values(self):\n self.next_depreciation_date += relativedelta(years=1)\n if self.sale_date and self.next_depreciation_date > self.sale_date:\n self.next_depreciation_date = self.sale_date\n if self.method_info['base_value'] == 'book_value' or self.reset_partially:\n self.base_value = self.book_value - self.salvage_value\n if self.reset_partially:\n self.annuities -= self.annuity_number\n self.total_annuities -= self.annuity_number + self.need_additional_annuity\n self.annuity_number = 0\n self.need_additional_annuity = False\n self.reset_partially = False\n\n def get_lines(self):\n return self.lines\n\n def pprint(self):\n from pprint import pprint\n return pprint(self.get_lines())\n\n\nclass DepreciationBoardLine(object):\n\n def __init__(self, depreciation_date, base_value, depreciation_value, accumulated_value, book_value,\n exceptional_value=0.0, book_value_wo_exceptional=0.0, readonly=False, rounding=2, **optional_args):\n self.depreciation_date = depreciation_date\n self.base_value = float_round(base_value, precision_digits=rounding)\n self.depreciation_value = float_round(depreciation_value, precision_digits=rounding)\n self.accumulated_value = float_round(accumulated_value, precision_digits=rounding)\n self.book_value = float_round(book_value, precision_digits=rounding)\n self.exceptional_value = float_round(exceptional_value, precision_digits=rounding)\n self.book_value_wo_exceptional = float_round(book_value_wo_exceptional or book_value, precision_digits=rounding)\n self.readonly = readonly\n self.current_year_accumulated_value = float_round(optional_args.get('current_year_accumulated_value',\n self.depreciation_value + self.exceptional_value),\n precision_digits=rounding)\n self.previous_years_accumulated_value = float_round(optional_args.get('previous_years_accumulated_value',\n self.accumulated_value - self.depreciation_value +\n self.book_value_wo_exceptional - self.book_value -\n self.exceptional_value),\n precision_digits=rounding)\n\n def __repr__(self):\n return repr(self.__dict__)\n\n def __str__(self):\n return str(self.__dict__)\n\n def _get_period_value(self, board, values, depreciation_date):\n period_value, exists = 0.0, False\n period_start_date = get_period_start_date(depreciation_date, board.fiscalyear_start_day, board.depreciation_period)\n if period_start_date < board.depreciation_start_date:\n period_start_date = board.depreciation_start_date\n for month in values:\n if period_start_date.strftime('%Y-%m') <= month <= depreciation_date.strftime('%Y-%m'):\n period_value += values[month]['depreciation_value'] if isinstance(values[month], dict) else values[month]\n exists = True\n return period_value, exists\n\n def _get_readonly_value(self, board, depreciation_date):\n return self._get_period_value(board, board.readonly_values, depreciation_date)\n\n def _get_exceptional_value(self, board, depreciation_date):\n value, exists = self._get_period_value(board, board.exceptional_values, depreciation_date)\n return value\n\n def get_periodical_lines(self, board):\n # TODO: improve me\n if board.depreciation_period == 12:\n return [self]\n period_depreciation_start_date = get_period_start_date(self.depreciation_date, board.fiscalyear_start_day, 12)\n if period_depreciation_start_date < board.depreciation_start_date:\n period_depreciation_start_date = board.depreciation_start_date\n period_depreciation_stop_date = self.depreciation_date\n if board.board_stop_date and board.board_stop_date and period_depreciation_stop_date > board.board_stop_date:\n period_depreciation_stop_date = get_period_stop_date(board.board_stop_date, board.fiscalyear_start_day, board.depreciation_period)\n prorata_temporis_by_period = get_prorata_temporis_by_period(period_depreciation_start_date, period_depreciation_stop_date,\n board.fiscalyear_start_day, board.depreciation_period)\n if not prorata_temporis_by_period:\n return []\n if board.method_info['need_additional_annuity'] and board.board_stop_date and period_depreciation_stop_date >= board.board_stop_date:\n real_end_date = period_depreciation_stop_date + relativedelta(days=1) \\\n + relativedelta(month=board.depreciation_start_date.month, day=board.depreciation_start_date.day) \\\n - relativedelta(days=1)\n period_end_date = get_period_stop_date(real_end_date, board.fiscalyear_start_day, board.depreciation_period)\n period_dates = get_depreciation_period_dates(period_end_date, board.fiscalyear_start_day, board.depreciation_period)\n if real_end_date in period_dates:\n prorata_temporis_by_period[period_depreciation_stop_date] = 1.0\n else:\n prorata_temporis_by_period[period_depreciation_stop_date] = get_prorata_temporis(real_end_date + relativedelta(days=1),\n board.fiscalyear_start_day,\n board.depreciation_period, opposite=True)\n lines = []\n total = sum(prorata_temporis_by_period.values())\n previous_accumulated_value = accumulated_value = self.accumulated_value - self.depreciation_value\n book_value_wo_exceptional = self.book_value_wo_exceptional + self.depreciation_value\n book_value = self.book_value_wo_exceptional + self.exceptional_value\n exceptional_value = gap = accumulated_value_in_period = 0.0\n depreciation_number = len(prorata_temporis_by_period)\n for depreciation_index, depreciation_date in enumerate(sorted(prorata_temporis_by_period)):\n readonly_depreciation_value, readonly = self._get_readonly_value(board, depreciation_date)\n depreciation_value = float_round(self.depreciation_value * prorata_temporis_by_period[depreciation_date] / total,\n precision_digits=board.rounding)\n if readonly:\n gap += depreciation_value - readonly_depreciation_value\n depreciation_value = readonly_depreciation_value\n elif gap:\n depreciation_value += gap\n gap = 0.0\n if depreciation_index + 1 == depreciation_number:\n depreciation_value = self.depreciation_value - accumulated_value_in_period\n else:\n accumulated_value_in_period += depreciation_value\n accumulated_value += depreciation_value\n exceptional_value = self._get_exceptional_value(board, depreciation_date)\n book_value_wo_exceptional -= depreciation_value\n book_value = book_value_wo_exceptional - exceptional_value\n vals = {\n 'depreciation_date': depreciation_date,\n 'base_value': self.base_value,\n 'depreciation_value': depreciation_value,\n 'previous_years_accumulated_value': previous_accumulated_value,\n 'current_year_accumulated_value': accumulated_value - previous_accumulated_value,\n 'accumulated_value': accumulated_value,\n 'exceptional_value': exceptional_value,\n 'book_value': book_value,\n 'book_value_wo_exceptional': book_value_wo_exceptional,\n 'rounding': board.rounding,\n 'readonly': readonly,\n }\n lines.append(DepreciationBoardLine(**vals))\n return lines\n","sub_path":"smile_account_asset/depreciation_board.py","file_name":"depreciation_board.py","file_ext":"py","file_size_in_byte":19408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"238418999","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os,sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\n\nfrom cnb import cnb\nfrom em import emlearn\nfrom plearn import plearn\nfrom genere import genere\nfrom genere import genere_missing as gm\nfrom genere import variable_cachee as vc\nimport pyAgrum as gum\nimport matplotlib.pyplot as plt\n\n\ncsvpath = parentdir + '/csv/'\nbifpath = parentdir + '/bif/'\n\n# Comparaison des parametres de deux BN\ndef compareParams(nomBIF1,nomBIF2):\n\n\tbn1 = gum.BayesNet()\n\tbn1.loadBIF(nomBIF1)\n\n\tbn2 = gum.BayesNet()\n\tbn2.loadBIF(nomBIF2)\n\n\tsomme = 0\n\tfor nodeid in range(bn1.size()):\n\t\tp1=bn1.cpt(nodeid)\n\t\ti1=gum.Instantiation(p1)\n\t\ti1.setFirst()\n\t\tp2=bn2.cpt(nodeid)\n\t\twhile (not i1.end()):\n\t\t\tsomme += pow((p1.get(i1)-p2.get(i1)),2)\n\t\t\ti1.inc()\n\n\treturn (1.0/bn1.size()) * somme\n\n# Evaluation des performances des methodes d'apprentissage\n\ndef evalLearning(N,methode = 0):\n\t\"\"\"\n\tN: nombre de lignes des bases de donnees\n\tmethode: determine quel apprentissage on evalue\n\t\"\"\"\n\n\t# methode = 0: plearn\n\t# methode = 1: EM sans variable cachee\n\t# methode = 2: EM avec variable cachee\n\t\n\tif methode == 0:\n\t\t\n\t\tres = []\n\t\n\t\tfor I in range(1,N+100,100):\n\t\t\tprint (I)\n\t\t\tgenere.genererCSV(I,csvpath+\"perf.csv\",bifpath+\"bn.bif\")\n\t\t\tplearn.learn(bifpath+\"empty_bn.bif\",csvpath+\"perf.csv\",bifpath+'perf1.bif')\n\t\t\tres.append(compareParams(bifpath+'perf1.bif',bifpath+'bn.bif'))\n\t\t\n\t\tplt.xlabel('Nombre de lignes de la base')\n\t\tplt.ylabel(u'Différence')\n\t\tplt.plot(range(1,N+100,100),res)\n\t\tplt.show()\n\t\t\n\telif methode == 1:\n\t\tres = []\n\t\tpourcs = [0.0,3.0,5.0,10.0,20.0,50.0]\n\t\t\n\t\tfor pourc in pourcs:\n\t\t\tprint (\"Pourcentage de valeurs manquantes\",pourc)\n\t\t\tgm.genererCSV(N,csvpath+\"perf.csv\",bifpath+\"bn.bif\",pourc)\n\t\t\temlearn.learn(bifpath+\"empty_bn.bif\",csvpath+\"perf.csv\",bifpath+'perf1.bif',30)\n\t\t\tres.append(compareParams(bifpath+'perf1.bif',bifpath+'bn.bif'))\n\t\t\n\t\tplt.xlabel('Pourcentage de valeurs manquantes')\n\t\tplt.ylabel(u'Différence')\n\t\tplt.plot(pourcs,res)\n\t\tplt.show()\n\t\t\n\telif methode == 2:\n\t\tres = []\n\t\tpourcs = [0.0,3.0,5.0,10.0,20.0,50.0]\n\t\tfor pourc in pourcs:\n\t\t\tprint (\"Pourcentage de valeurs manquantes\",pourc)\n\t\t\t\n\t\t\tvc.genererCSV(6,N,csvpath+'perf.csv',bifpath+'varC.bif',pourc)\n\t\t\temlearn.learn(bifpath+\"empty_varC.bif\",csvpath+\"perf.csv\",bifpath+'perf1.bif',10)\n\t\t\t\n\t\t\tres.append(compareParams(bifpath+'perf1.bif',bifpath+'varC.bif'))\n\t\t\t\n\t\tplt.xlabel('Pourcentage de valeurs manquantes')\n\t\tplt.ylabel(u'Différence')\n\t\tplt.plot(pourcs,res)\n\t\tplt.show()\n","sub_path":"MADI_Ducamp_Taillé/MADI_Ducamp_Taill�/Sources et ressources/perf/perf.py","file_name":"perf.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"578018983","text":"# A Python script to load image data and prepare it for training.\n# It will read data from the train_data folder.\n# The groun truth values are contained in the ground_truth.csv folder. \n\nfrom keras.preprocessing.image import ImageDataGenerator\nimport pandas as pd\nimport numpy as np\nfrom joblib import delayed, Parallel\nimport psutil\n\n# A class for loading data\nclass data_loader():\n\n def __init__(self, train_directory, val_directory, test_directory, data_size):\n self.train_directory = train_directory\n self.val_directory = val_directory\n self.test_directory = test_directory \n\n \n def load_images(self):\n train_datagen = ImageDataGenerator()#width_shift_range=0.2,height_shift_range=0.2,zoom_range=0.2,fill_mode='nearest' )\n # Need to test with class_mode = \"input\"\n train_generator = train_datagen.flow_from_directory(self.train_directory, \n target_size=(128, 128),\n color_mode=\"grayscale\",\n class_mode=\"categorical\",\n batch_size=128,\n shuffle =True,\n seed=42, classes=['centred', 'hexagonal', 'noise', 'oblique','rectangular','square'])\n \n val_datagen = ImageDataGenerator()\n val_generator = val_datagen.flow_from_directory(self.val_directory,\n target_size=(128,128),\n color_mode=\"grayscale\",\n class_mode='categorical',\n shuffle =True,seed=42,\n batch_size=128,classes=['centred', 'hexagonal', 'noise', 'oblique','rectangular','square'])\n \n \n test_datagen = ImageDataGenerator()\n test_generator = test_datagen.flow_from_directory(self.test_directory, \n target_size=(128, 128),\n color_mode=\"grayscale\",\n class_mode=\"categorical\",\n batch_size=128,classes=['centred', 'hexagonal', 'noise', 'oblique','rectangular','square'])\n \n \n\n \n\n return train_generator, val_generator, test_generator\n","sub_path":"dataLoader.py","file_name":"dataLoader.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"446776414","text":"from tower_defense.tower_bullet import *\nfrom tower_defense.game_object import *\nfrom tower_defense.game_object import GameObject\nfrom frame_counter import *\nfrom physics.box_collider import BoxCollider\nimport tower_defense.game_object\nfrom physics.tower_circle import *\nfrom renderers.animation import Animation\nfrom img_animation import *\nimport pygame\nclass Tower(GameObject) :\n def __init__(self,x,y):\n GameObject.__init__(self,x,y-20)\n self.frame_counter = FrameCounter(30)\n self.shoot_lock = False\n self.box_collider = BoxCollider(64,80)\n # self.tower_circle = Tower_circle(0,0)\n self.rangeCheck = False\n self.range = 0\n self.enemy_x = tower_defense.game_object.get_position_from_main[0]\n self.enemy_y = tower_defense.game_object.get_position_from_main[1]\n self.renderer = Animation(list_tower,loop=True,frame_delay=30)\n\n def update(self):\n GameObject.update(self)\n self.check()\n\n def shoot(self,e_x,e_y):\n # print(tower_defense.game_object.hasEnemy)\n if tower_defense.game_object.hasEnemy == True:\n if self.shoot_lock == False:\n bullet = TowerBullet(self.x,self.y,e_x,e_y)\n add(bullet)\n # bullet = TowerBullet()\n self.shoot_lock = True\n\n self.frame_counter.run()\n if self.shoot_lock:\n if self.frame_counter.flag == True:\n self.shoot_lock = False\n self.frame_counter.reset()\n\n def check(self):\n for game_object in game_objects:\n if ((type(game_object) == Enemy or type(game_object) == Boss) and game_object.is_active):\n self.range = ((self.x - game_object.x)**2 + (self.y - game_object.y)**2)**0.5\n if self.range <= radius:\n self.shoot(game_object.x,game_object.y)\n self.rangeCheck = True\n break\n else:\n self.rangeCheck = False\n \n\n\n","sub_path":"tower_defense/tower.py","file_name":"tower.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"286027898","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget\r\n#\r\n# if __name__ == '__main__':的作用\r\n#\r\n# 一个python文件通常有两种使用方法\r\n# 第一是作为脚本直接执行\r\n# 第二是 import 到其他的 python 脚本中被调用(模块重用)执行\r\n# 因此 if __name__ == 'main': 的作用就是控制这两种情况执行代码的过程\r\n# 在 if __name__ == 'main': 下的代码只有在第一种情况下才会被执行\r\n# 而 import 到其他脚本中是不会被执行的\r\n#\r\n# if __name__ == '__main__':的运行原理\r\n#\r\n# 每个python模块(python文件,也就是此处的 test.py 和 import_test.py)\r\n# 都包含内置的变量 __name__,当该模块被直接执行的时候\r\n# __name__ 等于文件名(包含后缀 .py );如果该模块 import 到其他模块中\r\n# 则该模块的 __name__ 等于模块名称(不包含后缀.py)\r\n#\r\n# 而 “__main__” 始终指当前执行模块的名称(包含后缀.py)\r\n# 进而当模块被直接执行时,__name__ == 'main' 结果为真.\r\n# print(__name__)\r\n#\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv) # 创建QApplication类的实例\r\n # sys.argv 是获取运行python文件的时候命令行参数,且以list形式存储参数\r\n # sys.argv[0] 代表当前module的名字\r\n windows = QWidget() # 创建一个窗口\r\n windows.resize(1000, 500) # 设置窗口的尺寸\r\n windows.move(300, 300) # 移动窗口\r\n windows.setWindowTitle('App bases on PyQt5') # 设置窗口的标题\r\n windows.show() # 显示窗口\r\n\r\n sys.exit(app.exec_()) # 进入程序的主循环、并通过exit函数确保主循环安全结束\r\n # app.exec_()\r\n # 其实就是QApplication的方法,原来这个exec_()\r\n # 方法的作用是“进入程序的主循环直到exit()\r\n # 被调用”,如果没有这个方法,运行的时候窗口会闪退,\r\n # 所以show是有发挥作用的,但没有使用exec_(),\r\n # 所以没有进入程序的主循环就直接结束了。\r\n # 不用sys.exit(app.exec_()),只使用app.exec_(),\r\n # 程序一样可以正常运行,但是关闭窗口后进程却不会退出,\r\n # 尝试print输出app.exec_()的结果,返回0\r\n #\r\n # 补充:python os._exit() 和sys.exit() 使用\r\n # https://blog.csdn.net/RedPintings/article/details/81562235\r\n\r\n","sub_path":"RunPyQt5/E01Basic/BasicDemo.py","file_name":"BasicDemo.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"199475883","text":"\"\"\"pycln/utils/refactor.py tests.\"\"\"\n# pylint: disable=R0201,W0613\nimport ast\nfrom pathlib import Path\n\nimport pytest\nfrom libcst import ParserSyntaxError\nfrom pytest_mock import mock\n\nfrom pycln.utils import config, refactor, report\nfrom pycln.utils._exceptions import (\n ReadPermissionError,\n UnexpandableImportStar,\n UnparsableFile,\n UnsupportedCase,\n WritePermissionError,\n)\nfrom pycln.utils._nodes import Import, ImportFrom, NodeLocation\nfrom pycln.utils.scan import HasSideEffects, ImportStats, SourceStats\n\nfrom .utils import sysu\n\n# Constants.\nMOCK = \"pycln.utils.refactor.%s\"\n\n\nclass TestRefactor:\n\n \"\"\"`Refactor` methods test case.\"\"\"\n\n def setup_method(self, method):\n self.configs = config.Config(paths=[Path(\"\")])\n self.reporter = report.Report(self.configs)\n self.session_maker = refactor.Refactor(self.configs, self.reporter)\n\n @pytest.mark.parametrize(\n \"source_lines, expec_lines\",\n [\n pytest.param(\n [\n \"try:\\n\",\n \" pass\\n\",\n \"except:\\n\",\n \" import y\\n\",\n ],\n [\n \"try:\\n\",\n \" pass\\n\",\n \"except:\\n\",\n \" import y\\n\",\n ],\n id=\"useful\",\n ),\n pytest.param(\n [\n \"try:\\n\",\n \" import x\\n\",\n \" pass\\n\",\n \"except:\\n\",\n \" import y\\n\",\n ],\n [\n \"try:\\n\",\n \" import x\\n\",\n \"except:\\n\",\n \" import y\\n\",\n ],\n id=\"single-useless\",\n ),\n pytest.param(\n [\n \"try:\\n\",\n \" import x\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \"except:\\n\",\n \" import y\\n\",\n ],\n [\n \"try:\\n\",\n \" import x\\n\",\n \"except:\\n\",\n \" import y\\n\",\n ],\n id=\"multi-useless0\",\n ),\n pytest.param(\n [\n \"try:\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \" pass\\n\",\n \"except:\\n\",\n \" import y\\n\",\n ],\n [\n \"try:\\n\",\n \" pass\\n\",\n \"except:\\n\",\n \" import y\\n\",\n ],\n id=\"multi-useless1\",\n ),\n pytest.param(\n [\n \"def foo():\\n\",\n \" '''docs'''\\n\",\n \" pass\\n\",\n ],\n [\n \"def foo():\\n\",\n \" '''docs'''\\n\",\n ],\n id=\"useless with docs\",\n ),\n pytest.param(\n [\n \"x = i if i else y\\n\",\n ],\n [\n \"x = i if i else y\\n\",\n ],\n id=\"TypeError\",\n ),\n ],\n )\n def test_remove_useless_passes(self, source_lines, expec_lines):\n fixed_code = refactor.Refactor.remove_useless_passes(source_lines)\n assert fixed_code == expec_lines\n\n @pytest.mark.parametrize(\n \"safe_read_raise, _code_session_raise\",\n [\n pytest.param(None, None, id=\"without errors\"),\n pytest.param(\n ReadPermissionError(13, \"\", Path(\"\")), None, id=\"ReadPermissionError\"\n ),\n pytest.param(\n WritePermissionError(13, \"\", Path(\"\")), None, id=\"WritePermissionError\"\n ),\n pytest.param(\n None, UnparsableFile(Path(\"\"), SyntaxError(\"\")), id=\"UnparsableFile\"\n ),\n ],\n )\n @mock.patch(MOCK % \"Refactor._output\")\n @mock.patch(MOCK % \"Refactor._code_session\")\n @mock.patch(MOCK % \"iou.safe_read\")\n def test_session(\n self, safe_read, _code_session, _output, safe_read_raise, _code_session_raise\n ):\n safe_read.return_value = (\"code...\\ncode...\\n\", \"utf-8\", \"\\n\")\n safe_read.side_effect = safe_read_raise\n _code_session.return_value = \"code...\\ncode...\\n\"\n _code_session.side_effect = _code_session_raise\n self.session_maker.session(Path(\"modified\"))\n assert self.session_maker._path == Path(\"\")\n\n @pytest.mark.parametrize(\n \"skip_file_return, _analyze_return, expec_fixed_code\",\n [\n pytest.param(True, None, \"original.code\", id=\"file skip\"),\n pytest.param(False, None, \"original.code\", id=\"no stats\"),\n pytest.param(False, (\"s\", \"i\"), \"fixed.code\", id=\"refactored\"),\n ],\n )\n @mock.patch(MOCK % \"Refactor._refactor\")\n @mock.patch(MOCK % \"Refactor._analyze\")\n @mock.patch(MOCK % \"scan.parse_ast\")\n @mock.patch(MOCK % \"regexu.skip_file\")\n def test_code_session(\n self,\n skip_file,\n parse_ast,\n _analyze,\n _refactor,\n skip_file_return,\n _analyze_return,\n expec_fixed_code,\n ):\n skip_file.return_value = skip_file_return\n _analyze.return_value = _analyze_return\n _refactor.return_value = \"fixed.code\"\n with sysu.std_redirect(sysu.STD.ERR):\n fixed_code = self.session_maker._code_session(\"original.code\")\n assert fixed_code == expec_fixed_code\n\n @pytest.mark.parametrize(\n \"fixed_lines, original_lines, mode, expec_output\",\n [\n pytest.param(\n [\"code..\\n\"], [\"code..\\n\"], \"verbose\", \"looks good!\", id=\"unchanged\"\n ),\n pytest.param(\n [\"fixed.code..\\n\"],\n [\"original.code..\\n\"],\n \"check\",\n \"🚀\",\n id=\"changed-check\",\n ),\n pytest.param(\n [\"import x\\n\"],\n [\"import x, y\\n\"],\n \"diff\",\n \"-import x, y\\n+import x\\n\",\n id=\"changed-diff\",\n ),\n pytest.param(\n [\"import x\\n\"],\n [\"import x, y\\n\"],\n \"diff\",\n \"-import x, y\\n+import x\\n\",\n id=\"changed-diff\",\n ),\n ],\n )\n @mock.patch(MOCK % \"Refactor.remove_useless_passes\")\n def test_output(self, x, fixed_lines, original_lines, mode, expec_output):\n x.return_value = fixed_lines\n setattr(self.configs, mode, True)\n with sysu.std_redirect(sysu.STD.OUT) as stdout:\n self.session_maker._output(fixed_lines, original_lines, \"utf-8\", \"\\n\")\n assert expec_output in stdout.getvalue()\n\n @mock.patch(MOCK % \"Refactor.remove_useless_passes\")\n def test_output_write(self, x):\n fixed_lines, original_lines = [\"import x\\n\"], [\"import x, y\\n\"]\n x.return_value = fixed_lines\n with sysu.reopenable_temp_file(\"\".join(original_lines)) as tmp_path:\n with open(tmp_path) as tmp:\n self.session_maker._path = tmp_path\n self.session_maker._output(fixed_lines, original_lines, \"utf-8\", \"\\n\")\n assert tmp.readlines() == fixed_lines\n\n @pytest.mark.parametrize(\n \"get_stats_raise, expec_val\",\n [\n pytest.param(None, (\"\", \"\"), id=\"normal\"),\n pytest.param(Exception(\"\"), None, id=\"error\"),\n ],\n )\n @mock.patch(MOCK % \"scan.SourceAnalyzer.get_stats\")\n def test_analyze(self, get_stats, get_stats_raise, expec_val):\n get_stats.return_value = (\"\", \"\")\n get_stats.side_effect = get_stats_raise\n with sysu.std_redirect(sysu.STD.ERR):\n val = self.session_maker._analyze(ast.parse(\"\"), [\"\"])\n assert val == expec_val\n\n @pytest.mark.parametrize(\n (\n \"skip_import_return, _expand_import_star_return, _get_used_names_return,\"\n \"_transform_return, expand_stars, mode, original_lines, expec_fixed_lines\"\n ),\n [\n pytest.param(\n True,\n None,\n None,\n [\"import x # nopycln: import\"],\n False,\n \"not-matter\",\n [\"import x, y # nopycln: import\"],\n [\"import x, y # nopycln: import\"],\n id=\"nopycln\",\n ),\n pytest.param(\n False,\n (None, None),\n None,\n [\"import x, y\"],\n False,\n \"not-matter\",\n [\"import *\"],\n [\"import *\"],\n id=\"unexpandable star\",\n ),\n pytest.param(\n False,\n (None, True),\n {\"x\", \"y\"},\n [\"import x, y\"],\n False,\n \"not-matter\",\n [\"import *\"],\n [\"import *\"],\n id=\"star, used, no -x\",\n ),\n pytest.param(\n False,\n (\n Import(NodeLocation((1, 0), 1), [ast.alias(name=\"x\", asname=None)]),\n True,\n ),\n {\"x\", \"y\"},\n [\"import x, y\"],\n True,\n \"not-matter\",\n [\"import *\"],\n [\"import x, y\"],\n id=\"star, used, -x\",\n ),\n pytest.param(\n False,\n (\n Import(NodeLocation((1, 0), 1), [ast.alias(name=\"x\", asname=None)]),\n True,\n ),\n set(),\n [\"\"],\n None,\n \"not-matter\",\n [\"import *\"],\n [\"\"],\n id=\"star, not used\",\n ),\n pytest.param(\n False,\n (\n Import(NodeLocation((1, 0), 1), [ast.alias(name=\"x\", asname=None)]),\n False,\n ),\n set(\"x\"),\n None,\n False,\n \"not-matter\",\n [\"import x\"],\n [\"import x\"],\n id=\"all used, no -x\",\n ),\n pytest.param(\n False,\n (\n Import(NodeLocation((1, 0), 1), [ast.alias(name=\"x\", asname=None)]),\n True,\n ),\n set(\"x\"),\n [\"import x, y\"],\n True,\n \"not-matter\",\n [\"import x\"],\n [\"import x, y\"],\n id=\"all used, -x\",\n ),\n pytest.param(\n False,\n (\n Import(NodeLocation((1, 0), 1), [ast.alias(name=\"x\", asname=None)]),\n False,\n ),\n set(\"x\"),\n [\"import x\"],\n True,\n \"check\",\n [\"import x, y\"],\n [\"import x, y\\n_CHANGED_\"],\n id=\"check\",\n ),\n ],\n )\n @mock.patch(MOCK % \"Refactor._transform\")\n @mock.patch(MOCK % \"Refactor._get_used_names\")\n @mock.patch(MOCK % \"Refactor._expand_import_star\")\n @mock.patch(MOCK % \"regexu.skip_import\")\n def test_refactor(\n self,\n skip_import,\n _expand_import_star,\n _get_used_names,\n _transform,\n skip_import_return,\n _expand_import_star_return,\n _get_used_names_return,\n _transform_return,\n expand_stars,\n mode,\n original_lines,\n expec_fixed_lines,\n ):\n skip_import.return_value = skip_import_return\n _expand_import_star.return_value = _expand_import_star_return\n _get_used_names.return_value = _get_used_names_return\n _transform.return_value = _transform_return\n setattr(self.configs, \"expand_stars\", expand_stars)\n setattr(self.configs, mode, True)\n node = Import(NodeLocation((1, 0), 1), [ast.alias(name=\"x\", asname=None)])\n self.session_maker._import_stats = ImportStats({node}, set())\n with sysu.std_redirect(sysu.STD.OUT):\n with sysu.std_redirect(sysu.STD.ERR):\n fixed_code = self.session_maker._refactor(original_lines)\n assert fixed_code == \"\".join(expec_fixed_lines)\n\n @pytest.mark.parametrize(\n \"_should_remove_return, node, is_star, expec_names\",\n [\n pytest.param(\n False,\n Import(\n NodeLocation((1, 0), 1),\n [\n ast.alias(name=\"x\", asname=None),\n ast.alias(name=\"y\", asname=None),\n ],\n ),\n False,\n {\"x\", \"y\"},\n id=\"used\",\n ),\n pytest.param(\n True,\n Import(\n NodeLocation((1, 0), 1),\n [\n ast.alias(name=\"x\", asname=None),\n ast.alias(name=\"y\", asname=None),\n ],\n ),\n False,\n set(),\n id=\"not-used\",\n ),\n pytest.param(\n True,\n Import(\n NodeLocation((1, 0), 1),\n [\n ast.alias(name=\"x\", asname=None),\n ast.alias(name=\"y\", asname=None),\n ],\n ),\n True,\n set(),\n id=\"not-used, star\",\n ),\n ],\n )\n @mock.patch(MOCK % \"Refactor._should_remove\")\n def test_get_used_names(\n self, _should_remove, _should_remove_return, node, is_star, expec_names\n ):\n _should_remove.return_value = _should_remove_return\n with sysu.std_redirect(sysu.STD.OUT):\n used_names = self.session_maker._get_used_names(node, is_star)\n assert used_names == expec_names\n\n @pytest.mark.parametrize(\n (\n \"rebuild_import_return, rebuild_import_raise, \"\n \"location, original_lines, updated_lines\"\n ),\n [\n pytest.param(\n \"import x\\n\",\n None,\n NodeLocation((1, 0), 1),\n [\"import x, i\\n\", \"import y\\n\"],\n [\"import x\\n\", \"import y\\n\"],\n id=\"normal\",\n ),\n pytest.param(\n \"import x\\n\",\n UnsupportedCase(Path(\"\"), NodeLocation((1, 0), 1), \"\"),\n NodeLocation((1, 0), 1),\n [\"import x, i\\n\", \"import y\\n\"],\n [\"import x\\n\", \"import y\\n\"],\n id=\"UnparsableFile\",\n ),\n pytest.param(\n \"import x\\n\",\n ParserSyntaxError(\"\", lines=[\"\"], raw_line=1, raw_column=0),\n NodeLocation((1, 0), 1),\n [\"import x; import y\\n\"],\n [\"import x; import y\\n\"],\n id=\"libcst.ParserSyntaxError\",\n ),\n ],\n )\n @mock.patch(MOCK % \"Refactor._insert\")\n @mock.patch(MOCK % \"transform.rebuild_import\")\n def test_transform(\n self,\n rebuild_import,\n _insert,\n rebuild_import_return,\n rebuild_import_raise,\n location,\n original_lines,\n updated_lines,\n ):\n rebuild_import.side_effect = rebuild_import_raise\n rebuild_import.return_value = rebuild_import_return\n _insert.return_value = updated_lines\n with sysu.std_redirect(sysu.STD.ERR):\n fixed_lines = self.session_maker._transform(\n location, set(), original_lines, updated_lines\n )\n assert fixed_lines == updated_lines\n\n @pytest.mark.parametrize(\n \"expand_import_star_raise, name, expec_is_star\",\n [\n pytest.param(None, \"*\", True, id=\"star\"),\n pytest.param(None, \"!*\", False, id=\"not-star\"),\n pytest.param(\n UnexpandableImportStar(Path(\"\"), NodeLocation((1, 0), 1), \"\"),\n \"*\",\n None,\n id=\"not-star\",\n ),\n ],\n )\n @mock.patch(MOCK % \"scan.expand_import_star\")\n def test_expand_import_star(\n self,\n expand_import_star,\n expand_import_star_raise,\n name,\n expec_is_star,\n ):\n node = ImportFrom(NodeLocation((1, 0), 1), [ast.alias(name=name)], \"xxx\", 0)\n expand_import_star.return_value = node\n expand_import_star.side_effect = expand_import_star_raise\n enode, is_star = self.session_maker._expand_import_star(node)\n assert (enode, is_star) == (node, expec_is_star)\n\n @pytest.mark.parametrize(\n \"_has_used_return, name, asname, expec_val\",\n [\n pytest.param(True, \"os.path.join\", None, True, id=\"used\"),\n pytest.param(False, \"os.path.join\", None, False, id=\"unused\"),\n pytest.param(None, \"os.path.join\", \"asname\", False, id=\"as alias\"),\n pytest.param(None, \"os\", None, False, id=\"single name\"),\n ],\n )\n @mock.patch(MOCK % \"Refactor._has_used\")\n def test_is_partially_used(\n self, _has_used, _has_used_return, name, asname, expec_val\n ):\n _has_used.return_value = _has_used_return\n alias = ast.alias(name=name, asname=asname)\n val = self.session_maker._is_partially_used(alias, False)\n assert val == expec_val\n\n @pytest.mark.parametrize(\n \"_has_used_return, _has_side_effects_return, all_, name, expec_val\",\n [\n pytest.param(True, None, None, \"not-matter\", False, id=\"used\"),\n pytest.param(False, None, None, \"this\", False, id=\"known side effects\"),\n pytest.param(False, None, True, \"not-matter\", True, id=\"--all option\"),\n pytest.param(False, None, False, \"os\", True, id=\"standard lib\"),\n pytest.param(\n False,\n HasSideEffects.NO,\n False,\n \"not-matter\",\n True,\n id=\"no side-effects\",\n ),\n pytest.param(\n False,\n HasSideEffects.YES,\n False,\n \"not-matter\",\n False,\n id=\"no all\",\n ),\n ],\n )\n @mock.patch(MOCK % \"Refactor._has_side_effects\")\n @mock.patch(MOCK % \"Refactor._has_used\")\n def test_should_remove(\n self,\n _has_used,\n _has_side_effects,\n _has_used_return,\n _has_side_effects_return,\n all_,\n name,\n expec_val,\n ):\n _has_used.return_value = _has_used_return\n _has_side_effects.return_value = _has_side_effects_return\n setattr(self.configs, \"all_\", all_)\n alias = ast.alias(name=name, asname=None)\n node = Import(NodeLocation((1, 0), 1), [alias])\n val = self.session_maker._should_remove(node, alias, False)\n assert val == expec_val\n\n @pytest.mark.parametrize(\n \"name, is_star, expec_val\",\n [\n pytest.param(\"x\", False, True, id=\"used name\"),\n pytest.param(\"y\", False, False, id=\"not-used name\"),\n pytest.param(\"x.i\", False, True, id=\"used attr\"),\n pytest.param(\"x.j\", False, False, id=\"not-used attr\"),\n pytest.param(\"x\", True, True, id=\"used name, star\"),\n pytest.param(\"__future__\", True, False, id=\"skip name, star\"),\n ],\n )\n def test_has_used(self, name, is_star, expec_val):\n self.session_maker._source_stats = SourceStats({\"x\"}, {\"i\"}, \"__future__\")\n val = self.session_maker._has_used(name, is_star)\n assert val == expec_val\n\n @pytest.mark.parametrize(\n (\n \"get_import_return, safe_read_return, safe_read_raise,\"\n \"parse_ast_return, parse_ast_raise,\"\n \"has_side_effects_return, has_side_effects_raise,\"\n ),\n [\n pytest.param(\n None,\n (\"\", \"\", \"\"),\n None,\n None,\n None,\n HasSideEffects.NOT_MODULE,\n None,\n id=\"no module path\",\n ),\n pytest.param(\n Path(\"\"),\n (\"\", \"\", \"\"),\n ReadPermissionError(13, \"\", Path(\"\")),\n None,\n None,\n HasSideEffects.NOT_KNOWN,\n None,\n id=\"no read permission\",\n ),\n pytest.param(\n Path(\"\"),\n (\"\", \"\", \"\"),\n None,\n ast.Module(),\n UnparsableFile(Path(\"\"), SyntaxError(\"\")),\n HasSideEffects.NOT_KNOWN,\n None,\n id=\"Unparsable File\",\n ),\n pytest.param(\n Path(\"\"),\n (\"\", \"\", \"\"),\n None,\n ast.Module(),\n None,\n HasSideEffects.NOT_KNOWN,\n Exception(\"err\"),\n id=\"generic err\",\n ),\n pytest.param(\n Path(\"\"),\n (\"\", \"\", \"\"),\n None,\n ast.Module(),\n None,\n HasSideEffects.YES,\n None,\n id=\"success\",\n ),\n ],\n )\n @mock.patch(MOCK % \"scan.SideEffectsAnalyzer.has_side_effects\")\n @mock.patch(MOCK % \"scan.SideEffectsAnalyzer.__init__\")\n @mock.patch(MOCK % \"scan.SideEffectsAnalyzer.visit\")\n @mock.patch(MOCK % \"scan.parse_ast\")\n @mock.patch(MOCK % \"iou.safe_read\")\n @mock.patch(MOCK % \"pathu.get_import_path\")\n @mock.patch(MOCK % \"pathu.get_import_from_path\")\n def test_has_side_effects(\n self,\n get_import_from_path,\n get_import_path,\n safe_read,\n parse_ast,\n visit,\n init,\n has_side_effects,\n get_import_return,\n safe_read_return,\n safe_read_raise,\n parse_ast_return,\n parse_ast_raise,\n has_side_effects_return,\n has_side_effects_raise,\n ):\n init.return_value = None\n get_import_from_path.return_value = get_import_return\n get_import_path.return_value = get_import_return\n safe_read.return_value = safe_read_return\n safe_read.side_effect = safe_read_raise\n parse_ast.return_value = parse_ast_return\n parse_ast.side_effect = parse_ast_raise\n has_side_effects.return_value = has_side_effects_return\n has_side_effects.side_effect = has_side_effects_raise\n with sysu.std_redirect(sysu.STD.ERR):\n node = Import(NodeLocation((1, 0), 1), [])\n val = self.session_maker._has_side_effects(\"\", node)\n assert val == has_side_effects_return\n\n @pytest.mark.parametrize(\n \"rebuilt_import, updated_lines, location, expec_updated_lines\",\n [\n pytest.param(\n [\"import x\\n\"],\n [\n \"import z\\n\",\n \"import x, i\\n\",\n \"import y\\n\",\n ],\n NodeLocation((2, 0), 2),\n [\n \"import z\\n\",\n \"import x\\n\",\n \"import y\\n\",\n ],\n id=\"single:replace\",\n ),\n pytest.param(\n \"\",\n [\n \"import z\\n\",\n \"import x, i\\n\",\n \"import y\\n\",\n ],\n NodeLocation((2, 0), 2),\n [\n \"import z\\n\",\n \"\",\n \"import y\\n\",\n ],\n id=\"single:remove\",\n ),\n pytest.param(\n [\n \"from xxx import (\\n\",\n \" x\\n\",\n \")\\n\",\n ],\n [\n \"import z\\n\",\n \"from xxx import (\\n\",\n \" x, y\\n\",\n \")\\n\",\n \"import y\\n\",\n ],\n NodeLocation((2, 0), 4),\n [\n \"import z\\n\",\n \"from xxx import (\\n\",\n \" x\\n\",\n \")\\n\",\n \"import y\\n\",\n ],\n id=\"multi:replace\",\n ),\n pytest.param(\n [\n \"from xxx import (\\n\",\n \" x\\n\",\n \")\\n\",\n ],\n [\n \"import z\\n\",\n \"from xxx import (\\n\",\n \" x,\\n\",\n \" y\\n\",\n \")\\n\",\n \"import y\\n\",\n ],\n NodeLocation((2, 0), 5),\n [\n \"import z\\n\",\n \"from xxx import (\\n\",\n \" x\\n\",\n \")\\n\",\n \"\",\n \"import y\\n\",\n ],\n id=\"multi:remove:part\",\n ),\n pytest.param(\n [\"\"],\n [\n \"import z\\n\",\n \"from xxx import (\\n\",\n \" x,\\n\",\n \" y\\n\",\n \")\\n\",\n \"import y\\n\",\n ],\n NodeLocation((2, 0), 5),\n [\n \"import z\\n\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"import y\\n\",\n ],\n id=\"multi:remove:all\",\n ),\n pytest.param(\n [\n \"from xxx import (\\n\",\n \" x,\\n\",\n \" y\\n\",\n \")\\n\",\n ],\n [\n \"import z\\n\",\n \"from xxx import *\\n\",\n \"import y\\n\",\n ],\n NodeLocation((2, 0), 2),\n [\n \"import z\\n\",\n \"from xxx import (\\n x,\\n y\\n)\\n\",\n \"import y\\n\",\n ],\n id=\"multi:add\",\n ),\n ],\n )\n def test_insert(self, rebuilt_import, updated_lines, location, expec_updated_lines):\n fixed = refactor.Refactor._insert(rebuilt_import, updated_lines, location)\n print(repr(fixed))\n assert fixed == expec_updated_lines\n","sub_path":"tests/test_refactor.py","file_name":"test_refactor.py","file_ext":"py","file_size_in_byte":27382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"299834359","text":"import nltk\nimport pickle\nfrom collections import Counter\nfrom helper import Vocabulary\n\n# This code is a Python script that use for mapping from text into numerical \n# index which specify for visual storytelling (VIST) task. The input of this \n# script is JSON formatted file downloadded from \n# http://visionandlanguage.net/VIST/dataset.html.\n# To download the dataset, we already provide a script downloader as follow:\n# https://github.com/systems-ai-lab/visualstorytelling-codebase/blob/master/script/download_dataset.sh\n\n# If you use the download_dataset.sh script for downloading the dataset or \n# directly download from the VIST website, we will get two kind of text \n# annotation such as \"SIS\" and \"DII\". This script is intended to build the\n# vovabulary from SIS text annotation.\n\n# Some part of this code is referenced to https://github.com/tkim-snu/GLACNet \n# work. \n\nclass GenerateVocabulary(object):\n # This class will be initialized by the dataset class if the vocabulary file is not exist\n def __init__(self, sis_data_object, minimum_treshold, output_vocabulary):\n self.sis_data_object = sis_data_object\n self.minimum_treshold = minimum_treshold\n self.output_vocabulary = output_vocabulary\n self.generate(self.sis_data_object, self.minimum_treshold, self.output_vocabulary)\n\n def generate(self, sis_data_object, minimum_treshold, output_vocabulary):\n vist = sis_data_object\n counter = Counter()\n ids = vist['stories'].keys()\n\n for i, id in enumerate(ids):\n story = vist['stories'][id]\n for annotation in story:\n caption = annotation['text']\n tokens = []\n try:\n tokens = nltk.tokenize.word_tokenize(caption.lower())\n except Exception:\n pass\n counter.update(tokens)\n\n if i % 1000 == 0:\n print(\"[%d/%d] Tokenized the story captions.\" %(i, len(ids)))\n\n words = [word for word, cnt in counter.items() if cnt >= minimum_treshold]\n\n vocab = Vocabulary()\n vocab.add_word('')\n vocab.add_word('')\n vocab.add_word('')\n vocab.add_word('')\n\n for i, word in enumerate(words):\n vocab.add_word(word)\n \n with open(output_vocabulary, 'wb') as f:\n pickle.dump(vocab, f)\n \n print(\"Total vocabulary size: %d\" %len(vocab))\n print(\"Saved the vocabulary wrapper to '%s'\" %output_vocabulary)\n\n return vocab","sub_path":"app/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"592898688","text":"from datetime import datetime, timedelta, date\nfrom logging import getLogger\n\nfrom influxdb import DataFrameClient\nfrom influxdb.exceptions import InfluxDBClientError\n\nfrom v1.cfg import cfg\nfrom v1.common.utils import TRACE\nfrom v1.common.utils import fix_period, fix\nfrom v1.core.constant import SerialType, X\n\nlogger = getLogger(__name__)\n\n\nclass DataDB:\n _db_name = cfg.get(\"data\", \"influx.name\")\n _db_server = cfg.get(\"data\", \"influx.server\")\n _db_port = cfg.getint(\"data\", \"influx.port\")\n _db_user = cfg.get(\"data\", \"influx.user\")\n _db_pass = cfg.get(\"data\", \"influx.password\")\n _db_pool_size = cfg.getint(\"data\", \"influx.pool_size\")\n\n _protocol = 'json'\n\n def __init__(self):\n self.self_client = None\n\n self.connect()\n\n def connect(self):\n \"\"\"\n 连接序列数据库\n \"\"\"\n self.self_client = DataFrameClient(DataDB._db_server, DataDB._db_port, DataDB._db_user, DataDB._db_pass,\n DataDB._db_name, pool_size=DataDB._db_pool_size)\n self.self_client.create_database(DataDB._db_name)\n\n return self\n\n def save_with_nan(self, ticker, dataframe, columns, tags=None, serial_type=SerialType.D1) -> int:\n \"\"\"\n 保存Ticker的分析数据,运行NaN值\n :param ticker:\n :param dataframe:\n :param columns:\n :param tags:\n :param serial_type:\n :return:\n \"\"\"\n if len(dataframe) == 0:\n return 0\n\n logger.log(TRACE, \"DB-->保存%s数据(允许NaN值)\", ticker)\n\n if tags is not None:\n if 'code' not in tags:\n tags.append('code')\n else:\n tags = ['code']\n\n # 批量保存非Nan列\n if len(tags) > 1:\n self.save(ticker, dataframe[tags], serial_type, silence=True)\n\n # 分配保存Nan列\n for col in columns:\n if col in tags:\n continue\n else:\n logger.log(TRACE, \"DB-->保存%s数据%s列\", ticker, col)\n self.save(ticker, dataframe[['code', col]].dropna(), serial_type, silence=True)\n\n return len(dataframe)\n\n def save(self, ticker, dataframe, serial_type=SerialType.D1, silence=False) -> int:\n \"\"\"\n 保存Ticker分析数据\n :param ticker:\n :param dataframe:\n :param serial_type:\n :param silence:\n :return:\n \"\"\"\n if len(dataframe) == 0:\n return 0\n\n if not silence:\n logger.log(TRACE, \"DB-->保存%s数据\", ticker)\n\n try:\n self.self_client.write_points(dataframe, serial_type.name,\n tag_columns=['code'], protocol=DataDB._protocol)\n except Exception as ex:\n logger.info(\"数据保存失败\", repr(ex))\n return 0\n\n return len(dataframe)\n\n def is_market(self, day: date):\n logger.log(TRACE, \"DB-->获取%s是否市场日期\", day.strftime(\"%Y-%m-%d\"))\n try:\n db_name = 'D1'\n sql = \"select count(date) from D1 where time = '\" + day.strftime(X.DATE_FORMAT_YMD_) + \"'\"\n\n rs = self.self_client.query(sql)\n if len(rs) == 0:\n return False\n\n data = rs[db_name].values\n\n count = data[0][0]\n return count > 0\n except InfluxDBClientError as ex:\n logger.info(\"发生异常:%s\", repr(ex))\n return None\n\n def period(self):\n logger.log(TRACE, \"DB-->获取数据起始日期\")\n try:\n db_name = 'D1'\n sql_start = \"select first(date) from D1 where time < '1999-01-01' limit 1\"\n sql_end = \"select last(date) from D1 where time > '2018-01-01' limit 1\"\n\n rs_start = self.self_client.query(sql_start)\n d1 = rs_start[db_name].values\n rs_end = self.self_client.query(sql_end)\n d2 = rs_end[db_name].values\n\n data = (d1[0][0], d2[0][0])\n return [datetime.strptime(s, '%Y-%m-%d').date() for s in data]\n except InfluxDBClientError as ex:\n logger.info(\"发生异常:%s\", repr(ex))\n return None\n\n def calendar(self, ticker):\n \"\"\"\n 获取一个Ticker所有已经本地存储的日期\n :param ticker:\n :return:\n \"\"\"\n logger.log(TRACE, \"DB-->获取%s日历\", ticker)\n try:\n db_name = 'D1'\n sql = 'select \"time\",\"date\" from \"' \\\n + db_name + '\" where \"code\"=\\'' \\\n + ticker + '\\''\n\n rs = self.self_client.query(sql)\n data = rs[db_name].values\n\n if data is None:\n raise ValueError(\"数据异常\")\n\n data = data[:, 0]\n return [datetime.strptime(s, '%Y-%m-%d').date() for s in data.tolist()]\n except InfluxDBClientError as ex:\n logger.info(\"发生异常:%s\", repr(ex))\n return None\n except KeyError as ex:\n logger.info(\"%s数据不存在\", ticker)\n return None\n\n def fetch(self, ticker, start, end, serial_type=SerialType.D1):\n \"\"\"\n 获得一个Ticker时间段的分析K线信息\n :param ticker:\n :param start:\n :param end:\n :param serial_type:\n :return:\n \"\"\"\n if start > end:\n logger.info(\"参数不对,start[%s]不能大于end[%s]\", start, end)\n return None\n\n start, end = fix_period(start, end, serial_type)\n end_str = end.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n start_str = start.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n logger.log(TRACE, \"DB-->获取%s指定时间内数据:%s -> %s\", ticker, start_str, end_str)\n\n name = serial_type.name\n sql = 'select * from \"' + name \\\n + '\" where \"time\" < \\'' \\\n + end_str \\\n + '\\' and \"time\" >= \\'' \\\n + start_str + '\\' and \"code\"=\\'' \\\n + ticker + '\\''\n\n rs = self.self_client.query(sql)\n return rs[name]\n\n def last_date(self, ticker, serial_type=SerialType.D1):\n \"\"\"\n 获取数据库存放的指定标的的最后时间\n :param ticker:\n :param serial_type:\n :return:\n \"\"\"\n logger.log(TRACE, \"DB-->获取%s最后交易日期\", ticker)\n try:\n name = serial_type.name\n sql = 'select last(\"date\"),\"code\" from \"' \\\n + name + '\" where \"code\"=\\'' \\\n + ticker + '\\''\n\n rs = self.self_client.query(sql)\n data = rs[name].index.to_pydatetime()\n\n return fix(data[0])\n except InfluxDBClientError as ex:\n logger.info(\"发生异常:%s\", repr(ex))\n return None\n except KeyError as ex:\n logger.debug(\"%s数据新增\", ticker)\n return None\n\n def first_not_index_date(self, ticker, serial_type=SerialType.D1):\n \"\"\"\n 获取未计算过指标的第一条数据\n ticker存在库中的数据包含基本的K线数据和包含分析的指标的数据\n 此函数返回第一个尚未分析过指标的日期\n :param serial_type:\n :param ticker:\n :return:\n \"\"\"\n logger.log(TRACE, \"DB-->获取%s未计算过指标的起始日期\", ticker)\n try:\n name = serial_type.name\n sql = 'select \"MACD\" from \"' \\\n + name + '\" where \"code\"=\\'' \\\n + ticker + '\\'' \\\n + 'order by time desc limit 1'\n\n rs = self.self_client.query(sql)\n data = rs[name].index.to_pydatetime()\n\n last_day = fix(data[0])\n return last_day + timedelta(days=1)\n except InfluxDBClientError as ex:\n logger.info(\"发生异常:%s\", repr(ex))\n return None\n except KeyError as ex:\n logger.debug(\"不存在%s指标数据\", ticker)\n return None\n\n def fetch_one_day(self, day):\n \"\"\"\n 获得一天的所有Ticker的行情和指标\n :return:\n \"\"\"\n\n day = fix(day, fix_time=True)\n end = day + timedelta(days=1)\n\n end_str = end.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n start_str = day.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n logger.log(TRACE, \"DB-->获取[%s,%s]的所有Ticker数据\", start_str, end_str)\n\n name = SerialType.D1.name\n sql = 'select * from \"' + name \\\n + '\" where \"time\" >= \\'' \\\n + start_str \\\n + '\\' and \"time\" <= \\'' \\\n + end_str + '\\''\n\n rs = self.self_client.query(sql)\n return rs[name]\n","sub_path":"dfhc-py/v1/data/db/influxdb.py","file_name":"influxdb.py","file_ext":"py","file_size_in_byte":8698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"298600894","text":"from doubly_linked_list import DoublyLinkedList\n\n# return the middle node of the DLL, if there are two nodes, return the left one\n# no empty list, length >= 1\n# we don't know the length\n# in a single pass\n# not sorted\n# 1 - 2 - 3 : 2\n# 1 - 2 - 3 - 4 : 2\ndef find_middle(dll):\n head = dll.head\n tail = dll.tail\n\n while head != tail and head.next != tail:\n head = head.next\n tail = tail.prev\n\n return head.value\n\nodd_nums = DoublyLinkedList()\n[odd_nums.add_to_head(i) for i in [5, 3, 4, 10, 7]]\nprint(find_middle(odd_nums))\n\neven_nums = DoublyLinkedList()\n[even_nums.add_to_tail(i) for i in [5, 3, 4, '10', 7, 8]]\nprint(find_middle(even_nums))\n\nlist_1 = DoublyLinkedList()\nlist_1.add_to_head(10)\nprint(find_middle(list_1))\n\nlist_2 = DoublyLinkedList()\nlist_2.add_to_head(11)\nlist_2.add_to_head(13)\nprint(find_middle(list_2))\n","sub_path":"doubly_linked_list/find_middle.py","file_name":"find_middle.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"187826743","text":"import unittest\r\nfrom city_functions import city_country\r\n\r\nclass CitiesTestCase(unittest.TestCase):\r\n \"\"\"Test for city_country.py\"\"\"\r\n def test_city_country(self):\r\n formatted_name=city_country('Santiago','Chile')\r\n self.assertEqual(formatted_name,'Santiago,Chile')\r\n\r\n def test_city_country_population(self):\r\n formatted_name=city_country('Santiago','Chile',population=100)\r\n self.assertEqual(formatted_name,'Santiago,Chile-population 100')\r\n\r\nunittest.main()\r\n","sub_path":"New folder/test_cities.py","file_name":"test_cities.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"103895109","text":"#!/usr/bin/python\nfrom . import private\nfrom . import line_data\nfrom . import model\nfrom . import rundir_num\n\nMOOG_path = '{}/.pymoog/moog_nosm/moog_nosm_NOV2019/'.format(private.os.environ['HOME'])\n# self.rundir_path = '{}/.pymoog/rundir/'.format(private.os.environ['HOME'])\nMOOG_file_path = '{}/.pymoog/files/'.format(private.os.environ['HOME'])\n\nclass blends(rundir_num.rundir_num):\n def __init__(self, teff, logg, m_h, start_wav, end_wav, EW, ele, vmicro=2, mass=1, line_list='ges', prefix=''):\n '''\n Initiate a abfind Instance and read the parameters.\n \n Parameters\n ----------\n teff : int\n The effective temperature of the model\n logg : float\n logg value of the model\n m_h : float\n [M/H] value (overall metallicity) of the model\n start_wav : float\n The start wavelength of the blended feature.\n end_wav : float\n The end wavelength of the blended feature.\n EW : float\n The measured equivalent width.\n ele : float\n The element index of the dominant line in the feature, e.g., Fe I -> 26.0.\n vmicro : float, default 2\n The microturbulance velocity of the model. \n mass : float, default 1\n The stellar mass of the input model. Only used when the model type is MARCS spherical.\n line_list : str, default 'ges'\n The name of the linelist file. If not specified will use built-in VALD linelist.\n prefix : str, default ''.\n The prefix to be added to the name of rundir. Convenient when you want to find a specified rundir if there are many.\n '''\n super(blends, self).__init__('{}/.pymoog/'.format(private.os.environ['HOME']), 'blends', prefix=prefix)\n self.teff = teff\n self.logg = logg\n self.m_h = m_h\n self.vmicro = vmicro\n self.mass = mass\n self.start_wav = start_wav\n self.end_wav = end_wav\n self.EW = EW\n self.ele = ele\n self.line_list = line_list\n \n def prepare_file(self, model_file=None, model_format='moog', loggf_cut=None, abun_change=None, atmosphere=1, lines=1, molecules=1, molecules_include=None, model_type='marcs', model_chem='st', model_geo='auto'):\n '''\n Prepare the model, linelist and control files for MOOG.\n Can either provide stellar parameters and wavelengths or provide file names.\n If fine name(s) provided, the files will be copied to working directory for calculation. \n \n Parameters\n ----------\n model_file : str, optional\n The name of the model file. If not specified, the code will use internal model.\n model_format : str, optional\n The type of the INPUT model file. Default is \"moog\" (then no conversion of format will be done); can be \"moog\", \"kurucz-atlas9\", \"kurucz-atlas12\" or \"marcs\". Should left as it is when not providing the input model file. \n loggf_cut : float, optional\n The cut in loggf; if specified will only include the lines with loggf >= loggf_cut.\n abun_change : dict of pairs {int:float, ...}\n Abundance change, have to be a dict of pairs of atomic number and [X/Fe] values.\n atmosphere : int, default 1\n The atmosphere value described in MOOG documention, section III.\n lines : int, default 1\n The lines value described in MOOG documention, section III.\n molecules : int, default 1\n The molecules value described in MOOG documention, section III.\n molecules_include : list, default None\n Molecules to be included to molecular calculation. Follows the MOOG notation.\n smooth_para : None or list, default None\n The smoothing parameter list of the synthetic spectra.\n model_type : str, default marcs\n The type of internal atmosphere model. Must be kurucz or marcs.\n model_chem : str, default st\n The chemical composition of marcs model. Only valid when model_type is marcs. \n model_geo : str, default auto\n The geometry of MARCS model, either 's' for spherical, 'p' for plane-parallel or 'auto'.\n '''\n \n if model_file == None:\n # Model file is not specified, will download Kurucz model according to stellar parameters.\n model.interpolate_model(self.teff, self.logg, self.m_h, vmicro=self.vmicro, mass=self.mass, abun_change=abun_change, molecules_include=molecules_include, save_name=self.rundir_path + 'model.mod', model_type=model_type, chem=model_chem, geo=model_geo)\n self.model_file = 'model.mod'\n else:\n # Model file is specified; record model file name and copy to working directory.\n if model_format == 'moog':\n private.subprocess.run(['cp', model_file, self.rundir_path], encoding='UTF-8', stdout=private.subprocess.PIPE)\n self.model_file = model_file.split('/')[-1]\n elif model_format[:6] == 'kurucz':\n model.kurucz2moog(model_path=model_file, abun_change=abun_change, model_format=model_format[7:], molecules_include=molecules_include, converted_model_path=self.rundir_path + 'model.mod')\n self.model_file = 'model.mod'\n elif model_format == 'marcs':\n marcs_model = model.read_marcs_model(model_file)\n model.marcs2moog(marcs_model, self.rundir_path + 'model.mod', abun_change=abun_change, molecules_include=molecules_include)\n self.model_file = 'model.mod'\n else:\n raise ValueError(\"The input model_type is not supported. Have to be either 'moog', 'kurucz' or 'marcs.\")\n\n\n if self.line_list[-5:] != '.list':\n # Linelist file is not specified, use internal line list;\n line_list = line_data.read_linelist(self.line_list, loggf_cut=loggf_cut, mode='npy')\n \n # Input EW into the linelist\n line_list = line_list[(line_list['wavelength'] >= self.start_wav) & (line_list['wavelength'] <= self.end_wav)].reset_index(drop=True)\n line_list.loc[1:, 'wavelength'] = -line_list.loc[1:, 'wavelength']\n line_list['EW'] = private.np.nan\n line_list.loc[0, 'EW'] = self.EW\n \n line_data.save_linelist(line_list, self.rundir_path + 'line.list', negative=True)\n self.line_list = 'line.list'\n elif self.line_list[-5:] == '.list':\n # Linelist file is specified; record linelist file name and copy to working directory.\n private.subprocess.run(['cp', self.line_list, self.rundir_path], encoding='UTF-8', stdout=private.subprocess.PIPE)\n self.line_list = self.line_list.split('/')[-1]\n # Input EW into the linelist\n line_list = line_data.read_linelist(self.rundir_path + self.line_list)\n line_list.loc[1:, 'wavelength'] = -line_list.loc[1:, 'wavelength']\n line_list['EW'] = private.np.nan\n line_list.loc[0, 'EW'] = self.EW\n line_data.save_linelist(line_list, self.rundir_path + 'line.list', negative=True)\n \n # Create parameter file.\n self.create_para_file(self.ele, atmosphere=atmosphere, lines=lines) \n \n def create_para_file(self, ele, atmosphere=1, lines=1, molecules=1, edge_width=0.5, step=0.005):\n '''\n Function for creating the parameter file of batch.par for blends.\n \n Parameters\n ----------\n ele : float\n Element of the target line.\n atmosphere : int, default 1\n The atmosphere value described in MOOG documention, section III.\n lines : int, default 1\n The lines value described in MOOG documention, section III.\n molecules : int, default 1\n The molecules value described in MOOG documention, section III.\n edge_width : float, defalut 0.5\n The width to be included in the fitting around the central wavelength.\n step : float\n The wavelength step size of the syntheses.\n '''\n MOOG_para_file = open(self.rundir_path + '/batch.par', 'w')\n # Parameter list of MOOG: standard output file (1), summary output file (2), smoothed output file (3),\n # begin wavelength, end wavelength, wavelength step;\n # smoothing function, Gaussian FWHM, vsini, limb darkening coefficient,\n # Macrotrubulent FWHM, Lorentzian FWHM\n #MOOG_para_file = open('batch.par', 'w')\n MOOG_contant = [\"blends\\n\",\n \"standard_out '{}'\\n\".format('MOOG.out1'),\n \"summary_out '{}'\\n\".format('MOOG.out2'),\n \"model_in '{}'\\n\".format(self.model_file),\n \"lines_in '{}'\\n\".format(self.line_list),\n \"atmosphere {}\\n\".format(atmosphere),\n \"lines {}\\n\".format(lines),\n \"molecules {}\\n\".format(molecules),\n \"terminal 'x11'\\n\",\n \"blenlimits\\n\",\n \" {} {} {:.1f}\".format(edge_width, step, ele)\n ]\n MOOG_para_file.writelines(MOOG_contant)\n MOOG_para_file.close()\n \n def run_moog(self, output=False):\n '''\n Run MOOG and print the reuslt if required.\n\n Parameters\n ----------\n output : boolen, default False\n If set to True, then print the out put of MOOG.\n\n Returns\n ----------\n None. Three files MOOG.out1, MOOG.out2 and MOOG.out3 will be save in the pymoog working path.\n '''\n \n MOOG_run = private.subprocess.run([MOOG_path + '/MOOGSILENT'], stdout=private.subprocess.PIPE, cwd=self.rundir_path)\n\n \n MOOG_run = str(MOOG_run.stdout, encoding = \"utf-8\").split('\\n')\n MOOG_output = []\n for i in MOOG_run:\n if len(i) > 12:\n ansi_escape = private.re.compile(r'\\x1b\\[...H')\n temp = ansi_escape.sub('', i)\n ansi_escape = private.re.compile(r'\\x1b\\[....H')\n temp = ansi_escape.sub('', temp)\n ansi_escape = private.re.compile(r'\\x1b\\[H')\n temp = ansi_escape.sub('', temp)\n ansi_escape = private.re.compile(r'\\x1b\\[2J')\n MOOG_output.append(ansi_escape.sub('', temp))\n \n # if unlock:\n # self.unlock()\n \n if output:\n for i in MOOG_output:\n print(i)\n \n if 'ERROR' in ''.join(MOOG_run):\n raise ValueError('There is error during the running of MOOG.')\n\n def read_output(self, remove=True):\n '''\n Read the output of abfind.\n\n Parameters\n ----------\n remove : bool, default True\n Whether remove the working folder after this function.\n\n Returns\n ---------\n self.blends_s_df : pandas DataFrame\n A pandas DataFrame containting one-line result of blends.\n '''\n file = open(self.rundir_path + 'MOOG.out2', 'r')\n blends_content = file.readlines()\n para = private.np.array(private.re.findall('[0-9]+.[0-9]+', blends_content[1]), dtype=float)\n\n sep_index = []\n for i in range(len(blends_content)):\n if 'wavelength' in blends_content[i]:\n begin_index = i\n elif 'average abundance' in blends_content[i]:\n end_index = i\n # print(begin_index, end_index)\n break\n\n blends_s_df = private.pd.DataFrame(private.np.array([ele.split() for ele in blends_content[begin_index+1:end_index]], dtype=float), columns=blends_content[begin_index].split())\n # Exclude the lines with no measurement.\n blends_s_df = blends_s_df[blends_s_df['abund'] != 999.99].reset_index(drop=True)\n\n self.blends_s_df = blends_s_df\n\n if remove:\n self.remove()","sub_path":"pymoog/blends.py","file_name":"blends.py","file_ext":"py","file_size_in_byte":12214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"537411062","text":"# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nfrom pants.base.payload import Payload\nfrom pants.build_graph.target import Target\n\nfrom pants.contrib.scalajs.targets.scala_js_target import ScalaJSTarget\n\n\nclass ScalaJSLibrary(ScalaJSTarget, Target):\n \"\"\"A library with scala sources, intended to be compiled to Javascript.\n\n Linking multiple libraries together into a shippable blob additionally requires a\n ScalaJSBinary target.\n \"\"\"\n\n def __init__(self, sources=None, address=None, payload=None, **kwargs):\n \"\"\"\n :param sources: Scala source that makes up this module; paths are relative to the BUILD\n file's directory.\n :type sources: `globs`, `rglobs` or a list of strings\n \"\"\"\n payload = payload or Payload()\n payload.add_fields({\n 'sources': self.create_sources_field(sources=sources,\n sources_rel_path=address.spec_path,\n key_arg='sources'),\n })\n super(ScalaJSLibrary, self).__init__(address=address, payload=payload, **kwargs)\n","sub_path":"contrib/scalajs/src/python/pants/contrib/scalajs/targets/scala_js_library.py","file_name":"scala_js_library.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"626540508","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © 2013 The Spyder Development Team\n# Licensed under the terms of the MIT License\n# (see spyderlib/__init__.py for details)\n\n\"\"\"\nIPython configuration variables needed by Spyder\n\"\"\"\n\nfrom spyderlib.utils import programs\nfrom spyderlib import dependencies\nfrom spyderlib.baseconfig import _\n\n\n# Constants\nIPYTHON_REQVER = '>=1.0'\nZMQ_REQVER = '>=2.1.11'\nQTCONSOLE_REQVER = '>=4.0'\n\n\n# Dependencies\ndependencies.add(\"IPython\", _(\"IPython Console integration\"),\n required_version=IPYTHON_REQVER)\ndependencies.add(\"zmq\", _(\"IPython Console integration\"),\n required_version=ZMQ_REQVER)\n\n\n# Jupyter 4.0 requirements\nipy4_installed = programs.is_module_installed('IPython', '>=4.0')\nif ipy4_installed:\n dependencies.add(\"qtconsole\", _(\"IPython Console integration\"),\n required_version=QTCONSOLE_REQVER)\n\n\n# Auxiliary functions\ndef is_qtconsole_installed():\n pyzmq_installed = programs.is_module_installed('zmq')\n pygments_installed = programs.is_module_installed('pygments')\n ipyqt_installed = programs.is_module_installed('IPython.qt')\n\n if ipyqt_installed and pyzmq_installed and pygments_installed:\n if ipy4_installed:\n if programs.is_module_installed('qtconsole'):\n return True\n else:\n return False\n else:\n return True\n else:\n return False\n\n\n# Main check for IPython presence\nIPYTHON_QT_INSTALLED = is_qtconsole_installed()\n","sub_path":"lib/python2.7/site-packages/spyderlib/ipythonconfig.py","file_name":"ipythonconfig.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"47110213","text":"\"\"\"\nLanguage support for readalongs.g2p\n\"\"\"\n\nimport os\nimport glob\nimport json\nimport io\n\n\ndef base_dir():\n \"\"\"Get the default directory containing all languages.\"\"\"\n return os.path.dirname(__file__)\n\n\ndef lang_dir(lang, mapping_dir=None):\n \"\"\"Get the default resource directory for a language.\"\"\"\n lang = lang.replace('-', '_')\n lang = lang.replace('_ipa', '')\n if mapping_dir is None:\n mapping_dir = base_dir()\n return os.path.join(mapping_dir, lang)\n\n\ndef lang_dirs(mapping_dir=None):\n \"\"\"Iterate over the available languages and their resource directories.\"\"\"\n if mapping_dir is None:\n mapping_dir = base_dir()\n for name in os.listdir(mapping_dir):\n path = lang_dir(name, mapping_dir)\n # Make sure it is a directory with mapping (JSON) files\n if glob.glob(os.path.join(path, '*.json')):\n yield name, path\n\n\ndef get_mapping(src_lang, dst_lang, mapping_dir=None):\n \"\"\"Get mapping data based on filename conventions.\"\"\"\n path = os.path.join(lang_dir(src_lang, mapping_dir),\n '%s_to_%s.json' % (src_lang, dst_lang))\n return json.load(io.open(path))\n","sub_path":"readalongs/lang/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"160710897","text":"import numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import preprocessing\nfrom sklearn.metrics import accuracy_score\nimport os, sys\nfrom scipy import stats\n\n# File name to create data set based on training file or test file.\n# target list for the y value: i.e. results (survivors) so this method is able to output\n# two separate variables. A feature list (X) and a result list (y)\n# Data amount refers to how much of the data we want to look at. The default is 1000 when a value is not given.\ndef loadData(fileName, target, data_Amount=10000000):\n occupancy = list()\n temperature = list()\n humidity = list()\n light = list()\n co2 = list()\n humidity_Ratio = list()\n\n skipFirst = True\n with open(fileName, \"rt\") as f:\n for line in f:\n if skipFirst:\n skipFirst = False\n elif data_Amount == 0:\n break\n else:\n try:\n data_Amount -= 1\n occupancy.append(float(line.split(',')[7]))\n temperature.append(float(line.split(',')[2]))\n humidity.append(float(line.split(',')[3]))\n light.append(float(line.split(',')[4]))\n co2.append(float(line.split(',')[5]))\n humidity_Ratio.append(float(line.split(',')[6]))\n\n\n except ValueError:\n print(\"Error parsing on line\", line)\n\n\n # Test whether the Method works\n #print(\"The number of data stored in survived:\\n\", len(survived))\n #print(\"Expected: 1001, 1000 data plus 1 heading 'survived'.\")\n\n # Test that all data have been stored into lists\n #print(len(Pclass), \" \", Pclass)\n #print(len(parch), \" \", parch)\n #print(len(fare), \" \", fare)\n\n # merge into one feature set\n #le = preprocessing.LabelEncoder()\n #le.fit(is_match)\n #print(list(le.classes_))\n #fitted = le.transform(is_match)\n\n features = np.column_stack((temperature, humidity, light, co2, humidity_Ratio))\n #features.append(Pclass)\n #features.append(fitted.tolist())\n #features.append(parch)\n #features.append(fare)\n\n #print(len(features[1]), \" \", features[1])\n target.extend(occupancy)\n return features\n\ndef LogReg():\n train_y = list()\n train_X = loadData('datatraining.txt', train_y, 5000)\n print(len(train_y))\n print(len(train_X))\n\n test_y = list()\n test_X = loadData('datatest.txt', test_y)\n\n #print(train_X, '/n', test_X)\n sc = StandardScaler()\n sc.fit(train_X)\n\n train_X_std = sc.transform(train_X)\n test_X_std = sc.transform(test_X)\n\n log_reg = LogisticRegression()\n log_reg.fit(train_X_std,train_y)\n\n y_pred = log_reg.predict(test_X_std)\n\n print(\"Std accuracy: {0: .2f}%\".format(accuracy_score(test_y, y_pred)*100))\n print(\"Std accuracy: {0: .4f}\".format(accuracy_score(test_y, y_pred)))\n\n log_reg2 = LogisticRegression()\n log_reg2.fit(train_X, train_y)\n\n y_pred_2 = log_reg2.predict(test_X)\n\n print(\"NonStd accuracy: {0: .2f}%\".format(accuracy_score(test_y, y_pred_2) * 100))\n print(\"NonStd accuracy: {0: .4f}\".format(accuracy_score(test_y, y_pred_2)))\n\n #print(train_y)\n #print(test_y)\n\n\nif __name__ == '__main__':\n LogReg()\n","sub_path":"log_reg2.py","file_name":"log_reg2.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"620796308","text":"#Definition of inputs and outputs\n#==================================\n##BC=group\n##PG04_WaterQualityParameters_CoastColour_L1P=name\n##ParameterBoolean|Icol|Icol|false\n##ParameterBoolean|Calibration|Calibration|false\n##ParameterBoolean|Smile|Smile|true\n##ParameterBoolean|Equalization|Equalization|true\n##ParameterBoolean|IgnoreSeaIceClim|Ignore sea ice climatology|false\n##ParameterNumber|CloudBufferWidth|lnsert size of cloud buffer in pixel|0|None|2\n##ParameterNumber|CloudScreeningAmbiguous|Cloud screening ambiguous threshold|0|None|1.4\n##ParameterNumber|CloudScreeningSure|Cloud screening sure threshold|0|None|1.8\n##ParameterNumber|GlintCloudThresholdAddition|Glint cloud screening addition|0|None|0.1\n##ParameterBoolean|OutputCloudProbabilityFeatureValue|Write cloud probability feature value|false\n\nimport os\nimport glob\nimport tempfile\n\ntempfolder = 'wq_scripts_'\n\ndef folder_create(tempfolder):\n try:\n tempdir = glob.glob(os.path.join(tempfile.gettempdir(), tempfolder + '*'))[0]\n return tempdir\n except:\n progress.setConsoleInfo('Temporary folder:' + tempfolder + ' does not exist and will be created.')\n tempfile.mkdtemp(prefix=tempfolder)\n tempdir = glob.glob(os.path.join(tempfile.gettempdir(), tempfolder + '*'))[0]\n return tempdir\n\ndef folder_check(tempfolder):\n try:\n tempdir = glob.glob(os.path.join(tempfile.gettempdir(), tempfolder + '*'))[0]\n return False\n except IndexError:\n progress.setConsoleInfo('ERROR: Temporary folder:' + tempfolder + ' cloud not be created. Check for administration rights to create folder.')\n return True\n\ndef create_parameterfile(tempdir, Icol, Calibration, Smile, Equalization, IgnoreSeaIceClim, CloudBufferWidth, CloudScreeningAmbiguous, CloudScreeningSure, GlintCloudThresholdAddition, OutputCloudProbabilityFeatureValue):\n with open(tempdir + \"WaterQualityParameters01.txt\", \"w\") as text_file:\n text_file.write('icol='+ str(Icol).lower() + '\\n')\n text_file.write('calibration='+ str(Calibration).lower() + '\\n')\n text_file.write('smile='+ str(Smile).lower() + '\\n')\n text_file.write('equalization='+ str(Equalization).lower() + '\\n')\n text_file.write('ignoreSeaIceClim='+ str(IgnoreSeaIceClim).lower() + '\\n')\n text_file.write('cloudBufferWidth='+ str(CloudBufferWidth) + '\\n')\n text_file.write('cloudScreeningAmbiguous='+ str(CloudScreeningAmbiguous) + '\\n')\n text_file.write('cloudScreeningSure='+ str(CloudScreeningSure) + '\\n')\n text_file.write('glintCloudThresholdAddition='+ str(GlintCloudThresholdAddition) + '\\n')\n text_file.write('outputCloudProbabilityValue='+ str(OutputCloudProbabilityFeatureValue).lower() + '\\n')\n\ndef execution(tempfolder):\n tempdir = folder_create(tempfolder) + '/'\n if folder_check(tempfolder):\n return\n else:\n tempdir = glob.glob(os.path.join(tempfile.gettempdir(), tempfolder + '*'))[0] + '/'\n create_parameterfile(tempdir, Icol, Calibration, Smile, Equalization, IgnoreSeaIceClim, CloudBufferWidth, CloudScreeningAmbiguous, CloudScreeningSure, GlintCloudThresholdAddition, OutputCloudProbabilityFeatureValue)\n\nexecution(tempfolder)","sub_path":"scripts/GWA_TBX/PG04/PG04_WaterQualityParameters_01_CC_L1P.py","file_name":"PG04_WaterQualityParameters_01_CC_L1P.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"579810383","text":"from apriori import *\n\nif __name__ == \"__main__\":\n # list of datasets\n testFiles = [\"T10I2D100K\", \"T10I4D100K\", \"T20I2D100K\", \"T20I4D100K\", \"T20I6D100K\"]\n # list of support value\n support = [0.004, 0.006, 0.008]\n\n sc = SparkContext(appName=\"Spark Apriori\")\n spark = SparkSession(sc)\n schema = StructType([\n StructField(\"algorithm\", StringType(), False),\n StructField(\"datasets\", StringType(), False),\n StructField(\"support\", FloatType(), False)\n ])\n for i in range(5):\n schema.add(\"test{}\".format(i+1), FloatType(), True)\n experiments = []\n\n for f in testFiles:\n for s in support:\n times = []\n for i in range(5):\n start = time.time()\n apriori(sc, \"./data/{}.data\".format(f), \"./result/{}{}{}\".format(f, s, i+1), s)\n end = time.time()\n times.append(end - start)\n experiments.append([\"Apriori\", f, s] + times)\n df = spark.createDataFrame(experiments, schema)\n df.coalesce(1).write.mode(\"overwrite\").csv(\"./experiments/runtime{}\".format(f))\n experiments = []\n sc.stop()","sub_path":"src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"645567742","text":"from peewee import *\n\ndatabase = SqliteDatabase(\"cache.db\")\n\n\nclass Video(Model):\n vidId = CharField(unique=True)\n title = CharField()\n description = CharField()\n channel = CharField()\n publishDate = DateTimeField()\n thumbnail = CharField()\n duration = IntegerField()\n\n class Meta:\n database = database\n\n\ndatabase.connect()\ndatabase.create_tables([Video])\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"267805925","text":"# -*- coding: utf-8 -*-\n\nimport wx\nfrom wx.lib import masked\nimport burocracia\nimport datetime\nfrom models import *\nimport sys\nimport os\nimport codecs\n\nsetup_all()\n\nID_TOOLBAR_CONTRATO_NOVO = 1001\nID_TOOLBAR_CONTRATO_EDITAR = 1002\nID_TOOLBAR_CONTRATO_EXCLUIR = 1003\nID_TOOLBAR_CONTRATO_CRIAR_ARQUIVO = 1004\n\n\nclass WindowContrato(wx.MiniFrame):\n\n def __init__(self, parent):\n\n wx.MiniFrame.__init__(self, parent, id=wx.ID_ANY, size=(530, 280), pos=(300, 170), title=\"Contrato\", style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)\n self.panelContrato = wx.Panel(self, wx.ID_ANY)\n\n self.vbox1 = wx.BoxSizer(wx.VERTICAL)\n\n self.toolBar = wx.ToolBar(self, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.TB_TEXT)\n\n self.toolBar.AddLabelTool(ID_TOOLBAR_CONTRATO_NOVO, \"Novo\", wx.Bitmap(\"./imagens/add.png\"), shortHelp='Adiciona novo contrato')\n self.toolBar.AddSeparator()\n self.toolBar.AddLabelTool(ID_TOOLBAR_CONTRATO_EDITAR, \"Editar\", wx.Bitmap(\"./imagens/edit.png\"), shortHelp='Edita contrato selecionado')\n self.toolBar.AddSeparator()\n self.toolBar.AddLabelTool(ID_TOOLBAR_CONTRATO_EXCLUIR, \"Remover\", wx.Bitmap(\"./imagens/remove.png\"), shortHelp='Exclui contrato selecionado')\n self.toolBar.AddSeparator()\n self.toolBar.AddLabelTool(ID_TOOLBAR_CONTRATO_CRIAR_ARQUIVO, \"Gerar Arquivo\", wx.Bitmap(\"./imagens/file.png\"), shortHelp='Gera arquivo de contrato')\n self.toolBar.AddSeparator()\n self.toolBar.AddSeparator()\n self.toolBar.Realize()\n self.SetToolBar(self.toolBar)\n\n self.choicesCompetencias = [u'Orçamento', u'Janeiro', u'Fevereiro', u'Março', u'Abril', u'Maio', u'Junho', u'Julho', u'Agosto', u'Setembro',\n u'Outubro', u'Novembro', u'Dezembro']\n\n self.cbCompetenciaForView = wx.ComboBox(self.panelContrato, -1, pos=(1, 5), size=(200, -1), choices=self.choicesCompetencias, style=wx.CB_READONLY)\n self.cbCompetenciaForView.Bind(wx.EVT_COMBOBOX, self.insereContratoListCtrl)\n\n #ListCtrl\n self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)\n self.contratoListCtrl = wx.ListCtrl(self.panelContrato, wx.ID_ANY, pos=(0, 30), size=(525, 200), style=wx.LC_REPORT)\n self.contratoListCtrl.InsertColumn(0, u\"Número do Contrato\", width=115)\n self.contratoListCtrl.InsertColumn(1, u\"Nome Contratado\", width=155)\n self.contratoListCtrl.InsertColumn(2, u\"Objetivo do Contrato\", width=250)\n self.contratoListCtrl.InsertColumn(3, u'', width=0)\n self.contratoListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.capturaIdItemSelecionado)\n self.contratoListCtrl.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.anulaIdItemSelecionado)\n self.idSelecionado = None\n\n self.hbox1.Add(self.contratoListCtrl, 1, wx.EXPAND)\n #Fim ListCtrl\n #self.insereContratoListCtrl()\n\n self.siglasContratos = {u'Termo de Contrato': 'CT', u'Termo Aditivo ao Contrato': 'TACT', u'Termo de Re-Ratificação de Contrato':'TRRCT', \n u'Termo de Distrato de Contrato':'TDCT', u'Termo de Rescisão de Contrato':'TRCT', u'Termo Concessão de Uso':'TCU',\n u'Termo de Aditivo de Concessão de Uso':'TACU', u'Termo de Permissão de Uso':'TPU', u'Termo Aditivo de Permissão de Uso':'TAPU',\n u'Termo de Autorização de Uso':'TAU', u'Termo Aditivo de Autorização de Uso':'TAAU', u'Termo de Cessão':'TC', u'Termo Aditivo a Cessão':'TAC',\n u'Termo de Compromisso':'TCO', u'Termo Aditivo ao Compromisso':'TACO', u'Termo de Direito Real de Uso':'TDRU', \n u'Termo Aditivo ao Direito Real de Uso':'TADU', u'Termo de Doação':'TD', u'Carta Contrato':'CACT', u'Ordem de Serviços':'OS',\n u'Termo Aditivo a Ordem de Serviços':'TAOS', u'Termo de Revogação de Autorização de Uso':'TRTA', u'Termo de Adesão ao Contrato':'TA',\n u'Termo de Outorga':'TOU', u'Termo Aditivo de Outorga':'TAOU', u'Termo de Ex-Ofício':'TEXO', u'Termo Aditivo de Carta Contrato':'TACC',\n u'Termo de Cooperação Técnica':'TCT', u'Termo Aditivo de Cooperação Técnica':'ATCT', u'Termo de Ordem de Serviços':'TOS',\n u'Termo de Recebimento de Auxílio Aluguel':'TRAA', u'Termo de Recebimento de Cheque Moradia':'TRCM', u'Termo de Recebimento de Indenização':'TRIN',\n u'Termo de Quitação de Contrato':'TQC', u'Protocolo de Intenções':'PI', u'Termo Aditivo de Protocolo de Intenções':'TAPI',\n u'Termo Aditivo de Doação':'TAD', u'Apostila de Retificação de Contrato':'ARC', u'Termo de Contrato de Gestão':'TCG', \n u'Termo Aditivo de Contrato de Gestão':'TACG', u'Termo de Rescisão de Cessão':'TRCES', u'Termo de Apostilamento de Contrato':'TAPC',\n u'Apólice de contratação de serviços de seguro':'ASS', u'Termo Aditivo de Apólice de contratação de serviços de seguro':'TASS'}\n\n self.choicesTipoContrato = [u'Termo de Contrato', u'Termo Aditivo ao Contrato', u'Termo de Re-Ratificação de Contrato', u'Termo de Distrato de Contrato',\n u'Termo de Rescisão de Contrato', u'Termo Concessão de Uso', u'Termo de Aditivo de Concessão de Uso', u'Termo de Permissão de Uso',\n u'Termo Aditivo de Permissão de Uso', u'Termo de Autorização de Uso', u'Termo Aditivo de Autorização de Uso', u'Termo de Cessão',\n u'Termo Aditivo a Cessão', u'Termo de Compromisso', u'Termo Aditivo ao Compromisso', u'Termo de Direito Real de Uso',\n u'Termo Aditivo ao Direito Real de Uso', u'Termo de Doação', u'Carta Contrato', u'Ordem de Serviços',\n u'Termo Aditivo a Ordem de Serviços', u'Termo de Revogação de Autorização de Uso', u'Termo de Adesão ao Contrato',\n u'Termo de Outorga', u'Termo Aditivo de Outorga', u'Termo de Ex-Ofício', u'Termo Aditivo de Carta Contrato',\n u'Termo de Cooperação Técnica', u'Termo Aditivo de Cooperação Técnica', u'Termo de Ordem de Serviços',\n u'Termo de Recebimento de Auxílio Aluguel', u'Termo de Recebimento de Cheque Moradia', u'Termo de Recebimento de Indenização',\n u'Termo de Quitação de Contrato', u'Protocolo de Intenções', u'Termo Aditivo de Protocolo de Intenções',\n u'Termo Aditivo de Doação', u'Apostila de Retificação de Contrato', u'Termo de Contrato de Gestão',\n u'Termo Aditivo de Contrato de Gest��o', u'Termo de Rescisão de Cessão', u'Termo de Apostilamento de Contrato',\n u'Apólice de contratação de serviços de seguro', u'Termo Aditivo de Apólice de contratação de serviços de seguro']\n\n self.choicesRecebeValor = [u'S', u'N']\n\n self.choicesTipoJuridicoContratado = [u'Física', u'Jurídica', u'Outros']\n\n self.choicesCodigoMoeda = [u'Real', u'Dolar', u'Outra Moeda']\n\n self.tipoAditivo = [u'Acréscimo de valor', u'Decréscimo de valor', u'Não houve alteração de valor']\n\n #Binds\n\n self.Bind(wx.EVT_CLOSE, self.quit)\n self.Bind(wx.EVT_MENU, self.novoContrato, id=ID_TOOLBAR_CONTRATO_NOVO)\n self.Bind(wx.EVT_MENU, lambda event: self.editaWindowContrato(event, self.idSelecionado), id=ID_TOOLBAR_CONTRATO_EDITAR)\n self.Bind(wx.EVT_MENU, lambda event: self.excluiContrato(event, self.idSelecionado), id=ID_TOOLBAR_CONTRATO_EXCLUIR)\n self.Bind(wx.EVT_MENU, self.geraArquivoWindow, id=ID_TOOLBAR_CONTRATO_CRIAR_ARQUIVO)\n\n #Fim Binds\n\n self.Centre()\n self.MakeModal(True)\n self.Show()\n\n def anulaIdItemSelecionado(self, event):\n\n self.idSelecionado = None\n\n def capturaIdItemSelecionado(self, event):\n\n self.idSelecionado = self.contratoListCtrl.GetItem(event.GetIndex(), 3).GetText()\n\n def quit(self, event):\n\n self.MakeModal(False)\n self.Destroy()\n\n def toolBarControler(self, novo=True, editar=True, remover=True, gerar=True):\n\n self.toolBar.EnableTool(ID_TOOLBAR_CONTRATO_NOVO, novo)\n self.toolBar.EnableTool(ID_TOOLBAR_CONTRATO_EDITAR, editar)\n self.toolBar.EnableTool(ID_TOOLBAR_CONTRATO_EXCLUIR, remover)\n self.toolBar.EnableTool(ID_TOOLBAR_CONTRATO_CRIAR_ARQUIVO, gerar)\n\n def novoContrato(self, event):\n\n self.toolBarControler(False, False, False, False)\n\n self.windowNovoContrato = wx.MiniFrame(parent=self, id=wx.ID_ANY, size=(680, 850), pos=(300, 170), title=\"Novo - Contrato\", style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)\n self.panelNovoContrato = wx.Panel(self.windowNovoContrato, wx.ID_ANY)\n\n self.tcId = wx.TextCtrl(self.panelNovoContrato, -1, pos=(0, 0), size=(0, 0))\n self.tcId.SetValue('0')\n\n self.stCompetencia = wx.StaticText(self.panelNovoContrato, -1, u'Competência', pos=(10, 0), style=wx.ALIGN_LEFT)\n self.cbCompetencia = wx.ComboBox(self.panelNovoContrato, -1, pos=(10, 20), size=(250, -1), choices=self.choicesCompetencias, style=wx.CB_READONLY)\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 42), size=(660, 60))\n\n self.stTipoContrato = wx.StaticText(self.panelNovoContrato, -1, u'Tipo', pos=(10, 50), style=wx.ALIGN_LEFT)\n self.cbTipoContrato = wx.ComboBox(self.panelNovoContrato, -1, pos=(10, 70), size=(400, -1), choices=self.choicesTipoContrato, style=wx.CB_READONLY)\n self.cbTipoContrato.Bind(wx.EVT_COMBOBOX, self.insereSiglaContrato)\n\n self.stRecebeValor = wx.StaticText(self.panelNovoContrato, -1, u'Recebe Valor ?', pos=(430, 50), style=wx.ALIGN_LEFT)\n self.cbRecebeValor = wx.ComboBox(self.panelNovoContrato, -1, pos=(430, 70), size=(50, -1), choices=self.choicesRecebeValor, style=wx.CB_READONLY)\n self.cbRecebeValor.Bind(wx.EVT_COMBOBOX, self.liberaValor)\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 105), size=(660, 60))\n\n self.stTipoJuridicoContratado = wx.StaticText(self.panelNovoContrato, -1, u'Tipo Pessoa', pos=(10, 115), style=wx.ALIGN_LEFT)\n self.cbTipoJuridicoContratado = wx.ComboBox(self.panelNovoContrato, -1, pos=(10, 135), size=(150, -1), choices=self.choicesTipoJuridicoContratado, style=wx.CB_READONLY)\n self.cbTipoJuridicoContratado.Bind(wx.EVT_COMBOBOX, self.definirCampoCic)\n\n self.stCicContratado = wx.StaticText(self.panelNovoContrato, -1, u'CNPJ ou CPF', pos=(180, 115), style=wx.ALIGN_LEFT)\n self.tcCicContratado = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"\")\n self.tcCicContratado.SetSize((140, -1))\n self.tcCicContratado.SetPosition((180, 135))\n self.tcCicContratado.SetEditable(False)\n\n self.stNomeContratado = wx.StaticText(self.panelNovoContrato, -1, u'Nome', pos=(340, 115), style=wx.ALIGN_LEFT)\n self.tcNomeContratado = wx.TextCtrl(self.panelNovoContrato, -1, pos=(340, 135), size=(310, -1), style=wx.ALIGN_LEFT)\n self.tcNomeContratado.SetMaxLength(50)\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 165), size=(660, 170))\n\n self.stNumeroContrato = wx.StaticText(self.panelNovoContrato, -1, u'Número', pos=(10, 180), style=wx.ALIGN_LEFT)\n self.tcNumeroContratado = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 200), size=(100, -1), style=wx.ALIGN_LEFT)\n self.tcNumeroContratado.SetMaxLength(16)\n\n self.stObjetivoContrato = wx.StaticText(self.panelNovoContrato, -1, u'Objetivo', pos=(130, 180), style=wx.ALIGN_LEFT)\n self.tcObjetivoContratado = wx.TextCtrl(self.panelNovoContrato, -1, pos=(130, 200), size=(350, -1), style=wx.ALIGN_LEFT)\n self.tcObjetivoContratado.SetMaxLength(300)\n\n self.stCnpjOriginal = wx.StaticText(self.panelNovoContrato, -1, u'CNPJ da UG do contrato orig.', pos=(500, 180), style=wx.ALIGN_LEFT)\n self.tcCnpjOriginal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##.###.###/####-##\")\n self.tcCnpjOriginal.SetSize((140, -1))\n self.tcCnpjOriginal.SetPosition((500, 200))\n \n self.stCodigoMoeda = wx.StaticText(self.panelNovoContrato, -1, u'Tipo de Moeda', pos=(10, 230), style=wx.ALIGN_LEFT)\n self.cbCodigoMoeda = wx.ComboBox(self.panelNovoContrato, -1, pos=(10, 250), size=(90, -1), choices=self.choicesCodigoMoeda, style=wx.CB_READONLY)\n self.cbCodigoMoeda.Disable()\n\n self.stValorContrato = wx.StaticText(self.panelNovoContrato, -1, u'Valor', pos=(130, 230), style=wx.ALIGN_LEFT)\n self.tcValorContrato = wx.lib.masked.numctrl.NumCtrl(id=-1, parent=self.panelNovoContrato, pos=wx.Point(130, 250), style=0, value=0)\n self.tcValorContrato.SetFractionWidth(2)\n self.tcValorContrato.SetGroupChar(u\"#\")\n self.tcValorContrato.SetDecimalChar(u\",\")\n self.tcValorContrato.SetGroupChar(u\".\")\n self.tcValorContrato.SetAllowNegative(False)\n self.tcValorContrato.Disable()\n\n self.stDataAssinaturaContrato = wx.StaticText(self.panelNovoContrato, -1, u'Data de Assinatura', pos=(290, 230), style=wx.ALIGN_LEFT)\n self.tcDataAssinaturaContrato = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataAssinaturaContrato.SetSize((80, -1))\n self.tcDataAssinaturaContrato.SetPosition((290, 250))\n\n self.stDataVencimentoContrato = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(410, 230), style=wx.ALIGN_LEFT)\n self.tcDataVencimentoContrato = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoContrato.SetSize((80, -1))\n self.tcDataVencimentoContrato.SetPosition((410, 250))\n\n self.stDataCompetencia = wx.StaticText(self.panelNovoContrato, -1, u'Data Competência (AAAA/MM)', pos=(500, 230), style=wx.ALIGN_LEFT)\n self.tcDataCompetencia = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"####/##\")\n self.tcDataCompetencia.SetSize((80, -1))\n self.tcDataCompetencia.SetPosition((500, 250)) \n\n self.stNumeroProcessoLicitatorio = wx.StaticText(self.panelNovoContrato, -1, u'Número do Processo Licitatório', pos=(10, 280), style=wx.ALIGN_LEFT)\n self.tcNumeroProcessoLicitatorio = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 300), size=(200, -1), style=wx.ALIGN_LEFT)\n self.tcNumeroProcessoLicitatorio.SetMaxLength(16)\n\n self.stNumeroDiarioOficial = wx.StaticText(self.panelNovoContrato, -1, u'Número Diário Oficial', pos=(240, 280), style=wx.ALIGN_LEFT)\n self.tcNumeroDiarioOficial = wx.TextCtrl(self.panelNovoContrato, -1, pos=(240, 300), size=(100, -1), style=wx.ALIGN_LEFT)\n self.tcNumeroDiarioOficial.SetMaxLength(6)\n self.tcNumeroDiarioOficial.Bind(wx.EVT_CHAR, self.escapaChar)\n\n self.stDataPublicacaoContrato = wx.StaticText(self.panelNovoContrato, -1, u'Data de Publicação', pos=(380, 280), style=wx.ALIGN_LEFT)\n self.tcDataPublicacaoContrato = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataPublicacaoContrato.SetSize((80, -1))\n self.tcDataPublicacaoContrato.SetPosition((380, 300))\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 335), size=(660, 360))\n\n self.stNumeroCertidaoINSS = wx.StaticText(self.panelNovoContrato, -1, u'INSS', pos=(10, 350), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoINSS = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 370), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoINSS.SetMaxLength(60)\n\n self.stDataCertidaoINSS = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 350), style= wx.ALIGN_LEFT)\n self.tcDataCertidaoINSS = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataCertidaoINSS.SetSize((80, -1))\n self.tcDataCertidaoINSS.SetPosition((150, 370))\n\n self.stDataValidadeINSS = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 350), style= wx.ALIGN_LEFT)\n self.tcDataValidadeINSS = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataValidadeINSS.SetSize((80, -1))\n self.tcDataValidadeINSS.SetPosition((280, 370))\n\n self.stNumeroCertidaoFGTS = wx.StaticText(self.panelNovoContrato, -1, u'FGTS', pos=(10, 400), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFGTS = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 420), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFGTS.SetMaxLength(60)\n \n self.stDataCertidaoFGTS = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 400), style= wx.ALIGN_LEFT)\n self.tcDataCertidaoFGTS = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataCertidaoFGTS.SetSize((80, -1))\n self.tcDataCertidaoFGTS.SetPosition((150, 420))\n\n self.stDataValidadeFGTS = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 400), style= wx.ALIGN_LEFT)\n self.tcDataValidadeFGTS = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataValidadeFGTS.SetSize((80, -1))\n self.tcDataValidadeFGTS.SetPosition((280, 420))\n\n self.stNumeroCertidaoFazendaEstadual = wx.StaticText(self.panelNovoContrato, -1, u'Fazenda Estadual', pos=(10, 450), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaEstadual = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 470), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaEstadual.SetMaxLength(60)\n \n self.stDataEmissaoFazendaEstadual = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 450), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoFazendaEstadual = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoFazendaEstadual.SetSize((80, -1))\n self.tcDataEmissaoFazendaEstadual.SetPosition((150, 470))\n\n self.stDataVencimentoFazendaEstadual = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 450), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoFazendaEstadual = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoFazendaEstadual.SetSize((80, -1))\n self.tcDataVencimentoFazendaEstadual.SetPosition((280, 470))\n\n self.stNumeroCertidaoFazendaMunicipal = wx.StaticText(self.panelNovoContrato, -1, u'Fazenda Municipal', pos=(10, 500), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaMunicipal = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 520), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaMunicipal.SetMaxLength(60)\n \n self.stDataEmissaoFazendaMunicipal = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 500), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoFazendaMunicipal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoFazendaMunicipal.SetSize((80, -1))\n self.tcDataEmissaoFazendaMunicipal.SetPosition((150, 520))\n\n self.stDataVencimentoFazendaMunicipal = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 500), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoFazendaMunicipal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoFazendaMunicipal.SetSize((80, -1))\n self.tcDataVencimentoFazendaMunicipal.SetPosition((280, 520))\n\n self.stNumeroCertidaoFazendaFederal = wx.StaticText(self.panelNovoContrato, -1, u'Fazenda Federal', pos=(10, 550), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaFederal = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 570), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaFederal.SetMaxLength(60)\n \n self.stDataEmissaoFazendaFederal = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 550), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoFazendaFederal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoFazendaFederal.SetSize((80, -1))\n self.tcDataEmissaoFazendaFederal.SetPosition((150, 570))\n\n self.stDataVencimentoFazendaFederal = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 550), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoFazendaFederal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoFazendaFederal.SetSize((80, -1))\n self.tcDataVencimentoFazendaFederal.SetPosition((280, 570))\n\n self.stNumeroCertidaoCNDT = wx.StaticText(self.panelNovoContrato, -1, u'CNDT', pos=(10, 600), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoCNDT = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 620), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoCNDT.SetMaxLength(60)\n \n self.stDataEmissaoCNDT = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 600), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoCNDT = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoCNDT.SetSize((80, -1))\n self.tcDataEmissaoCNDT.SetPosition((150, 620))\n\n self.stDataVencimentoCNDT = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 600), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoCNDT = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoCNDT.SetSize((80, -1))\n self.tcDataVencimentoCNDT.SetPosition((280, 620))\n\n self.stNumeroCertidaoOutras = wx.StaticText(self.panelNovoContrato, -1, u'Outras', pos=(10, 650), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoOutras = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 670), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoOutras.SetMaxLength(60)\n \n self.stDataEmissaoOutras = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 650), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoOutras = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoOutras.SetSize((80, -1))\n self.tcDataEmissaoOutras.SetPosition((150, 670))\n\n self.stDataVencimentoOutras = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 650), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoOutras = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoOutras.SetSize((80, -1))\n self.tcDataVencimentoOutras.SetPosition((280, 670))\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 700), size=(660, 65))\n\n self.stNumeroContratoSuperior = wx.StaticText(self.panelNovoContrato, -1, u'Número do Contrato Superior', pos=(10, 715), style=wx.ALIGN_LEFT)\n self.tcNumeroContratoSuperior = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 735), size=(200, -1), style=wx.ALIGN_LEFT)\n self.tcNumeroContratoSuperior.SetMaxLength(16)\n\n self.stTipoAditivo = wx.StaticText(self.panelNovoContrato, -1, u'Tipo do Aditivo', pos=(240, 715), style=wx.ALIGN_LEFT)\n self.tcTipoAditivo = wx.ComboBox(self.panelNovoContrato, -1, pos=(240, 735), size=(180, -1), choices=self.tipoAditivo, style=wx.CB_READONLY)\n\n self.btnSalvar = wx.Button(self.panelNovoContrato, -1, \"Salvar\", pos=(230, 790),size=(-1,18))\n self.btnCancelar = wx.Button(self.panelNovoContrato, -1, \"Cancelar\", pos=(350, 790),size=(-1,18))\n\n self.windowNovoContrato.Centre()\n self.windowNovoContrato.Show()\n\n #Bind\n self.btnCancelar.Bind(wx.EVT_BUTTON, self.quitContratoNovo)\n self.btnSalvar.Bind(wx.EVT_BUTTON, self.salvarContrato)\n self.windowNovoContrato.Bind(wx.EVT_CLOSE, self.quitContratoNovo)\n #Fim Bind\n\n def liberaValor(self, event):\n\n if self.cbRecebeValor.GetValue() == 'S':\n self.cbCodigoMoeda.Enable()\n self.tcValorContrato.Enable()\n else:\n self.cbCodigoMoeda.Disable()\n self.cbCodigoMoeda.SetSelection(-1)\n self.tcValorContrato.Disable()\n self.tcValorContrato.SetValue(\"0\")\n\n def salvarContrato(self, event):\n\n if self.valida():\n Contrato(tipoContrato=unicode(self.cbTipoContrato.GetValue()),\n recebeValor=unicode(self.cbRecebeValor.GetValue()),\n tipoJuridicoContratado=unicode(self.cbTipoJuridicoContratado.GetValue()),\n cicContratado=unicode(self.tcCicContratado.GetValue()),\n nomeContratado=unicode(self.tcNomeContratado.GetValue()),\n numeroContrato=unicode(self.tcNumeroContratado.GetValue()),\n objetivoContrato=unicode(self.tcObjetivoContratado.GetValue()),\n codigoMoeda=unicode(self.cbCodigoMoeda.GetValue()),\n valorContrato=unicode(self.tcValorContrato.GetValue()),\n dataAssinaturaContrato=unicode(self.tcDataAssinaturaContrato.GetValue()),\n dataVencimentoContrato=unicode(self.tcDataVencimentoContrato.GetValue()),\n numeroProcessoLicitatorio=unicode(self.tcNumeroProcessoLicitatorio.GetValue()),\n numeroDiarioOficial=unicode(self.tcNumeroDiarioOficial.GetValue()),\n dataPublicacaoContrato=unicode(self.tcDataPublicacaoContrato.GetValue()),\n numeroCertidaoINSS=unicode(self.tcNumeroCertidaoINSS.GetValue()),\n dataCertidaoINSS=unicode(self.tcDataCertidaoINSS.GetValue()),\n dataValidadeINSS=unicode(self.tcDataValidadeINSS.GetValue()),\n numeroCertidaoFGTS=unicode(self.tcNumeroCertidaoFGTS.GetValue()),\n dataCertidaoFGTS=unicode(self.tcDataCertidaoFGTS.GetValue()),\n dataValidadeFGTS=unicode(self.tcDataValidadeFGTS.GetValue()),\n numeroCertidaoFazendaEstadual=unicode(self.tcNumeroCertidaoFazendaEstadual.GetValue()),\n dataCertidaoFazendaEstadual=unicode(self.tcDataEmissaoFazendaEstadual.GetValue()),\n dataValidadeFazendaEstadual=unicode(self.tcDataVencimentoFazendaEstadual.GetValue()),\n numeroCertidaoFazendaMunicipal=unicode(self.tcNumeroCertidaoFazendaMunicipal.GetValue()),\n dataCertidaoFazendaMunicipal=unicode(self.tcDataEmissaoFazendaMunicipal.GetValue()),\n dataValidadeFazendaMunicipal=unicode(self.tcDataVencimentoFazendaMunicipal.GetValue()),\n numeroCertidaoFazendaFederal=unicode(self.tcNumeroCertidaoFazendaFederal.GetValue()),\n dataCertidaoFazendaFederal=unicode(self.tcDataEmissaoFazendaFederal.GetValue()),\n dataValidadeFazendaFederal=unicode(self.tcDataVencimentoFazendaFederal.GetValue()),\n numeroCertidaoCNDT=unicode(self.tcNumeroCertidaoCNDT.GetValue()),\n dataCertidaoCNDT=unicode(self.tcDataEmissaoCNDT.GetValue()),\n dataValidadeCNDT=unicode(self.tcDataVencimentoCNDT.GetValue()),\n numeroCertidaoOutras=unicode(self.tcNumeroCertidaoOutras.GetValue()),\n dataCertidaoOutras=unicode(self.tcDataEmissaoOutras.GetValue()),\n dataValidadeOutras=unicode(self.tcDataVencimentoOutras.GetValue()),\n numeroContratoAnterior=unicode(self.tcNumeroContratoSuperior.GetValue()),\n tipoDoAditivo=unicode(self.tcTipoAditivo.GetValue()),\n cnpjOriginal=unicode(self.tcCnpjOriginal.GetValue()),\n dataCompetencia=unicode(self.tcDataCompetencia.GetValue()),\n competencia=unicode(self.cbCompetencia.GetValue()),\n )\n session.commit()\n self.message = wx.MessageDialog(None, u'Contrato salvo com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n self.insereContratoListCtrl(None)\n self.windowNovoContrato.Close()\n\n def quitContratoNovo(self, event):\n\n self.toolBarControler(True, True, True, True)\n self.windowNovoContrato.Destroy()\n\n def escapaChar(self, event):\n\n if event.GetKeyCode() < 256:\n\n if chr(event.GetKeyCode()).isdigit() or event.GetKeyCode() == 8 or event.GetKeyCode() == 127:\n event.Skip()\n else:\n event.Skip()\n\n def definirCampoCic(self, event):\n\n if event.GetString() == u\"Física\":\n\n self.tcCicContratado.SetValue('')\n self.tcCicContratado.SetMask((\"###.###.###-##\"))\n self.tcCicContratado.SetEditable(True)\n\n elif event.GetString() == u\"Jurídica\":\n\n self.tcCicContratado.SetValue('')\n self.tcCicContratado.SetMask((\"##.###.###/####-##\"))\n self.tcCicContratado.SetEditable(True)\n else:\n\n self.tcCicContratado.SetValue('')\n self.tcCicContratado.SetMask(('###############'))\n self.tcCicContratado.SetMask(('##############'))\n self.tcCicContratado.SetEditable(True)\n\n def editaWindowContrato(self, event, idContrato):\n\n if idContrato is None:\n self.message = wx.MessageDialog(None, u'Nenhum contrato foi selecionado! Selecione um na lista!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n self.toolBarControler(False, False, False, False)\n\n self.contrato = Contrato.query.filter_by(id=idContrato).first()\n\n self.windowEditaContrato = wx.MiniFrame(parent=self, id=wx.ID_ANY, size=(680, 850), pos=(300, 170), title=u\"Editar - Contrato\", style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)\n self.panelNovoContrato = wx.Panel(self.windowEditaContrato, wx.ID_ANY)\n\n self.tcId = wx.TextCtrl(self.panelNovoContrato, -1, pos=(0, 0), size=(0, 0))\n self.tcId.SetValue(unicode(self.contrato.id))\n\n self.stCompetencia = wx.StaticText(self.panelNovoContrato, -1, u'Competência', pos=(10, 0), style=wx.ALIGN_LEFT)\n self.cbCompetencia = wx.ComboBox(self.panelNovoContrato, -1, pos=(10, 20), size=(250, -1), choices=self.choicesCompetencias, style=wx.CB_READONLY)\n self.cbCompetencia.SetValue(self.contrato.competencia)\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 40), size=(660, 60))\n\n self.stTipoContrato = wx.StaticText(self.panelNovoContrato, -1, u'Tipo', pos=(10, 50), style=wx.ALIGN_LEFT)\n self.cbTipoContrato = wx.ComboBox(self.panelNovoContrato, -1, pos=(10, 70), size=(400, -1), choices=self.choicesTipoContrato, style=wx.CB_READONLY)\n self.cbTipoContrato.SetValue(self.contrato.tipoContrato)\n self.cbTipoContrato.Bind(wx.EVT_COMBOBOX, self.editaInsereSiglaContrato)\n\n self.stRecebeValor = wx.StaticText(self.panelNovoContrato, -1, u'Recebe Valor ?', pos=(430, 50), style=wx.ALIGN_LEFT)\n self.cbRecebeValor = wx.ComboBox(self.panelNovoContrato, -1, pos=(430, 70), size=(50, -1), choices=self.choicesRecebeValor, style=wx.CB_READONLY)\n self.cbRecebeValor.Bind(wx.EVT_COMBOBOX, self.liberaValor)\n self.cbRecebeValor.SetValue(self.contrato.recebeValor)\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 105), size=(660, 60))\n\n self.stTipoJuridicoContratado = wx.StaticText(self.panelNovoContrato, -1, u'Tipo Pessoa', pos=(10, 115), style=wx.ALIGN_LEFT)\n self.cbTipoJuridicoContratado = wx.ComboBox(self.panelNovoContrato, -1, pos=(10, 135), size=(150, -1), choices=self.choicesTipoJuridicoContratado, style=wx.CB_READONLY)\n self.cbTipoJuridicoContratado.Bind(wx.EVT_COMBOBOX, self.definirCampoCic)\n self.cbTipoJuridicoContratado.SetValue(self.contrato.tipoJuridicoContratado)\n\n self.stCicContratado = wx.StaticText(self.panelNovoContrato, -1, u'CNPJ ou CPF', pos=(180, 115), style=wx.ALIGN_LEFT)\n self.tcCicContratado = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"\")\n self.tcCicContratado.SetSize((140, -1))\n self.tcCicContratado.SetPosition((180, 135))\n self.tcCicContratado.SetEditable(True)\n self.tcCicContratado.SetValue(self.contrato.cicContratado)\n\n self.stNomeContratado = wx.StaticText(self.panelNovoContrato, -1, u'Nome', pos=(340, 115), style=wx.ALIGN_LEFT)\n self.tcNomeContratado = wx.TextCtrl(self.panelNovoContrato, -1, pos=(340, 135), size=(310, -1), style=wx.ALIGN_LEFT)\n self.tcNomeContratado.SetMaxLength(50)\n self.tcNomeContratado.SetValue(self.contrato.nomeContratado)\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 165), size=(660, 170))\n\n self.stNumeroContrato = wx.StaticText(self.panelNovoContrato, -1, u'Número', pos=(10, 180), style=wx.ALIGN_LEFT)\n self.tcNumeroContratado = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 200), size=(100, -1), style=wx.ALIGN_LEFT)\n self.tcNumeroContratado.SetMaxLength(16)\n self.tcNumeroContratado.SetValue(self.contrato.numeroContrato)\n\n self.stObjetivoContrato = wx.StaticText(self.panelNovoContrato, -1, u'Objetivo', pos=(130, 180), style=wx.ALIGN_LEFT)\n self.tcObjetivoContratado = wx.TextCtrl(self.panelNovoContrato, -1, pos=(130, 200), size=(350, -1), style=wx.ALIGN_LEFT)\n self.tcObjetivoContratado.SetMaxLength(300)\n self.tcObjetivoContratado.SetValue(self.contrato.objetivoContrato)\n\n self.stCnpjOriginal = wx.StaticText(self.panelNovoContrato, -1, u'CNPJ da UG do contrato orig.', pos=(500, 180), style=wx.ALIGN_LEFT)\n self.tcCnpjOriginal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##.###.###/####-##\")\n self.tcCnpjOriginal.SetSize((140, -1))\n self.tcCnpjOriginal.SetPosition((500, 200))\n self.tcCnpjOriginal.SetValue(self.contrato.cnpjOriginal)\n\n self.stCodigoMoeda = wx.StaticText(self.panelNovoContrato, -1, u'Tipo de Moeda', pos=(10, 230), style=wx.ALIGN_LEFT)\n self.cbCodigoMoeda = wx.ComboBox(self.panelNovoContrato, -1, pos=(10, 250), size=(90, -1), choices=self.choicesCodigoMoeda, style=wx.CB_READONLY)\n self.cbCodigoMoeda.SetValue(self.contrato.codigoMoeda)\n\n self.stValorContrato = wx.StaticText(self.panelNovoContrato, -1, u'Valor', pos=(130, 230), style=wx.ALIGN_LEFT)\n self.tcValorContrato = wx.lib.masked.numctrl.NumCtrl(id=-1, parent=self.panelNovoContrato, pos=wx.Point(130, 250), style=0, value=0)\n self.tcValorContrato.SetFractionWidth(2)\n self.tcValorContrato.SetGroupChar(u\"#\")\n self.tcValorContrato.SetDecimalChar(u\",\")\n self.tcValorContrato.SetGroupChar(u\".\")\n self.tcValorContrato.SetAllowNegative(False)\n self.tcValorContrato.SetValue(float(self.contrato.valorContrato))\n self.liberaValor(None)\n\n self.stDataAssinaturaContrato = wx.StaticText(self.panelNovoContrato, -1, u'Data de Assinatura', pos=(290, 230), style=wx.ALIGN_LEFT)\n self.tcDataAssinaturaContrato = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataAssinaturaContrato.SetSize((80, -1))\n self.tcDataAssinaturaContrato.SetPosition((290, 250))\n self.tcDataAssinaturaContrato.SetValue(self.contrato.dataAssinaturaContrato)\n\n self.stDataVencimentoContrato = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(410, 230), style=wx.ALIGN_LEFT)\n self.tcDataVencimentoContrato = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoContrato.SetSize((80, -1))\n self.tcDataVencimentoContrato.SetPosition((410, 250))\n self.tcDataVencimentoContrato.SetValue(self.contrato.dataVencimentoContrato)\n\n self.stDataCompetencia = wx.StaticText(self.panelNovoContrato, -1, u'Competência (AAAAMM)', pos=(500, 230), style=wx.ALIGN_LEFT)\n self.tcDataCompetencia = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"####/##\")\n self.tcDataCompetencia.SetSize((80, -1))\n self.tcDataCompetencia.SetPosition((500, 250))\n self.tcDataCompetencia.SetValue(self.contrato.dataCompetencia) \n\n self.stNumeroProcessoLicitatorio = wx.StaticText(self.panelNovoContrato, -1, u'Número do Processo Licitatório', pos=(10, 280), style=wx.ALIGN_LEFT)\n self.tcNumeroProcessoLicitatorio = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 300), size=(200, -1), style=wx.ALIGN_LEFT)\n self.tcNumeroProcessoLicitatorio.SetMaxLength(16)\n self.tcNumeroProcessoLicitatorio.SetValue(self.contrato.numeroProcessoLicitatorio)\n\n self.stNumeroDiarioOficial = wx.StaticText(self.panelNovoContrato, -1, u'Número Diário Oficial', pos=(240, 280), style=wx.ALIGN_LEFT)\n self.tcNumeroDiarioOficial = wx.TextCtrl(self.panelNovoContrato, -1, pos=(240, 300), size=(100, -1), style=wx.ALIGN_LEFT)\n self.tcNumeroDiarioOficial.SetMaxLength(6)\n self.tcNumeroDiarioOficial.Bind(wx.EVT_CHAR, self.escapaChar)\n self.tcNumeroDiarioOficial.SetValue(self.contrato.numeroDiarioOficial)\n\n self.stDataPublicacaoContrato = wx.StaticText(self.panelNovoContrato, -1, u'Data de Publicação', pos=(380, 280), style=wx.ALIGN_LEFT)\n self.tcDataPublicacaoContrato = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataPublicacaoContrato.SetSize((80, -1))\n self.tcDataPublicacaoContrato.SetPosition((380, 300))\n self.tcDataPublicacaoContrato.SetValue(self.contrato.dataPublicacaoContrato)\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 335), size=(660, 360))\n\n self.stNumeroCertidaoINSS = wx.StaticText(self.panelNovoContrato, -1, u'INSS', pos=(10, 350), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoINSS = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 370), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoINSS.SetMaxLength(60)\n self.tcNumeroCertidaoINSS.SetValue(self.contrato.numeroCertidaoINSS)\n\n self.stDataCertidaoINSS = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 350), style= wx.ALIGN_LEFT)\n self.tcDataCertidaoINSS = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataCertidaoINSS.SetSize((80, -1))\n self.tcDataCertidaoINSS.SetPosition((150, 370))\n self.tcDataCertidaoINSS.SetValue(self.contrato.dataCertidaoINSS)\n\n self.stDataValidadeINSS = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 350), style= wx.ALIGN_LEFT)\n self.tcDataValidadeINSS = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataValidadeINSS.SetSize((80, -1))\n self.tcDataValidadeINSS.SetPosition((280, 370))\n self.tcDataValidadeINSS.SetValue(self.contrato.dataValidadeINSS)\n\n self.stNumeroCertidaoFGTS = wx.StaticText(self.panelNovoContrato, -1, u'FGTS', pos=(10, 400), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFGTS = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 420), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFGTS.SetMaxLength(60)\n self.tcNumeroCertidaoFGTS.SetValue(self.contrato.numeroCertidaoFGTS)\n\n self.stDataCertidaoFGTS = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 400), style= wx.ALIGN_LEFT)\n self.tcDataCertidaoFGTS = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataCertidaoFGTS.SetSize((80, -1))\n self.tcDataCertidaoFGTS.SetPosition((150, 420))\n self.tcDataCertidaoFGTS.SetValue(self.contrato.dataCertidaoFGTS)\n\n self.stDataValidadeFGTS = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 400), style= wx.ALIGN_LEFT)\n self.tcDataValidadeFGTS = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataValidadeFGTS.SetSize((80, -1))\n self.tcDataValidadeFGTS.SetPosition((280, 420))\n self.tcDataValidadeFGTS.SetValue(self.contrato.dataValidadeFGTS)\n\n self.stNumeroCertidaoFazendaEstadual = wx.StaticText(self.panelNovoContrato, -1, u'Fazenda Estadual', pos=(10, 450), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaEstadual = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 470), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaEstadual.SetMaxLength(60)\n self.tcNumeroCertidaoFazendaEstadual.SetValue(self.contrato.numeroCertidaoFazendaEstadual)\n\n self.stDataEmissaoFazendaEstadual = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 450), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoFazendaEstadual = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoFazendaEstadual.SetSize((80, -1))\n self.tcDataEmissaoFazendaEstadual.SetPosition((150, 470))\n self.tcDataEmissaoFazendaEstadual.SetValue(self.contrato.dataCertidaoFazendaEstadual)\n\n self.stDataVencimentoFazendaEstadual = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 450), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoFazendaEstadual = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoFazendaEstadual.SetSize((80, -1))\n self.tcDataVencimentoFazendaEstadual.SetPosition((280, 470))\n self.tcDataVencimentoFazendaEstadual.SetValue(self.contrato.dataValidadeFazendaEstadual)\n\n self.stNumeroCertidaoFazendaMunicipal = wx.StaticText(self.panelNovoContrato, -1, u'Fazenda Municipal', pos=(10, 500), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaMunicipal = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 520), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaMunicipal.SetMaxLength(60)\n self.tcNumeroCertidaoFazendaMunicipal.SetValue(self.contrato.numeroCertidaoFazendaMunicipal)\n\n self.stDataEmissaoFazendaMunicipal = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 500), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoFazendaMunicipal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoFazendaMunicipal.SetSize((80, -1))\n self.tcDataEmissaoFazendaMunicipal.SetPosition((150, 520))\n self.tcDataEmissaoFazendaMunicipal.SetValue(self.contrato.dataCertidaoFazendaMunicipal)\n\n self.stDataVencimentoFazendaMunicipal = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 500), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoFazendaMunicipal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoFazendaMunicipal.SetSize((80, -1))\n self.tcDataVencimentoFazendaMunicipal.SetPosition((280, 520))\n self.tcDataVencimentoFazendaMunicipal.SetValue(self.contrato.dataValidadeFazendaMunicipal)\n\n self.stNumeroCertidaoFazendaFederal = wx.StaticText(self.panelNovoContrato, -1, u'Fazenda Federal', pos=(10, 550), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaFederal = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 570), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoFazendaFederal.SetMaxLength(60)\n self.tcNumeroCertidaoFazendaFederal.SetValue(self.contrato.numeroCertidaoFazendaFederal)\n\n self.stDataEmissaoFazendaFederal = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 550), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoFazendaFederal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoFazendaFederal.SetSize((80, -1))\n self.tcDataEmissaoFazendaFederal.SetPosition((150, 570))\n self.tcDataEmissaoFazendaFederal.SetValue(self.contrato.dataCertidaoFazendaFederal)\n\n self.stDataVencimentoFazendaFederal = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 550), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoFazendaFederal = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoFazendaFederal.SetSize((80, -1))\n self.tcDataVencimentoFazendaFederal.SetPosition((280, 570))\n self.tcDataVencimentoFazendaFederal.SetValue(self.contrato.dataValidadeFazendaFederal)\n\n self.stNumeroCertidaoCNDT = wx.StaticText(self.panelNovoContrato, -1, u'CNDT', pos=(10, 600), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoCNDT = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 620), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoCNDT.SetMaxLength(60)\n self.tcNumeroCertidaoCNDT.SetValue(self.contrato.numeroCertidaoCNDT)\n \n self.stDataEmissaoCNDT = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 600), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoCNDT = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoCNDT.SetSize((80, -1))\n self.tcDataEmissaoCNDT.SetPosition((150, 620))\n self.tcDataEmissaoCNDT.SetValue(self.contrato.dataCertidaoCNDT)\n\n self.stDataVencimentoCNDT = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 600), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoCNDT = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoCNDT.SetSize((80, -1))\n self.tcDataVencimentoCNDT.SetPosition((280, 620))\n self.tcDataVencimentoCNDT.SetValue(self.contrato.dataValidadeCNDT)\n\n self.stNumeroCertidaoOutras = wx.StaticText(self.panelNovoContrato, -1, u'Outras', pos=(10, 650), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoOutras = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 670), size=(100, -1), style= wx.ALIGN_LEFT)\n self.tcNumeroCertidaoOutras.SetMaxLength(60)\n self.tcNumeroCertidaoOutras.SetValue(self.contrato.numeroCertidaoOutras)\n \n self.stDataEmissaoOutras = wx.StaticText(self.panelNovoContrato, -1, u'Data Emissão', pos=(150, 650), style= wx.ALIGN_LEFT)\n self.tcDataEmissaoOutras = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataEmissaoOutras.SetSize((80, -1))\n self.tcDataEmissaoOutras.SetPosition((150, 670))\n self.tcDataEmissaoOutras.SetValue(self.contrato.dataCertidaoOutras)\n\n self.stDataVencimentoOutras = wx.StaticText(self.panelNovoContrato, -1, u'Data de Venc.', pos=(280, 650), style= wx.ALIGN_LEFT)\n self.tcDataVencimentoOutras = masked.TextCtrl(self.panelNovoContrato, -1, mask=\"##/##/####\")\n self.tcDataVencimentoOutras.SetSize((80, -1))\n self.tcDataVencimentoOutras.SetPosition((280, 670))\n self.tcDataVencimentoOutras.SetValue(self.contrato.dataValidadeOutras)\n\n wx.StaticBox(self.panelNovoContrato, -1, pos=(1, 700), size=(660, 65))\n\n self.stNumeroContratoSuperior = wx.StaticText(self.panelNovoContrato, -1, u'Número do Contrato Superior', pos=(10, 715), style=wx.ALIGN_LEFT)\n self.tcNumeroContratoSuperior = wx.TextCtrl(self.panelNovoContrato, -1, pos=(10, 735), size=(200, -1), style=wx.ALIGN_LEFT)\n self.tcNumeroContratoSuperior.SetMaxLength(16)\n self.tcNumeroContratoSuperior.SetValue(self.contrato.numeroContratoAnterior)\n\n self.stTipoAditivo = wx.StaticText(self.panelNovoContrato, -1, u'Tipo do Aditivo', pos=(240, 715), style=wx.ALIGN_LEFT)\n self.tcTipoAditivo = wx.ComboBox(self.panelNovoContrato, -1, pos=(240, 735), size=(180, -1), choices=self.tipoAditivo, style=wx.CB_READONLY)\n self.tcTipoAditivo.SetValue(self.contrato.tipoDoAditivo)\n\n self.btnEditar = wx.Button(self.panelNovoContrato, -1, \"Alterar\", pos=(230, 790),size=(-1,18))\n self.btnCancelar = wx.Button(self.panelNovoContrato, -1, \"Cancelar\", pos=(350, 790),size=(-1,18))\n\n\n self.windowEditaContrato.Centre()\n self.windowEditaContrato.Show()\n\n #Bind\n self.btnCancelar.Bind(wx.EVT_BUTTON, self.quitEditarContrato)\n self.btnEditar.Bind(wx.EVT_BUTTON, lambda event: self.editarContrato(event, self.contrato.id))\n self.windowEditaContrato.Bind(wx.EVT_CLOSE, self.quitEditarContrato)\n #Fim Bind\n\n def quitEditarContrato(self, event):\n\n self.toolBarControler(True, True, True, True)\n self.windowEditaContrato.Destroy()\n\n def editarContrato(self, event, id):\n\n if self.valida():\n\n self.contrato.numeroContrato = unicode(self.tcNumeroContratado.GetValue())\n self.contrato.valorContrato = unicode(self.tcValorContrato.GetValue())\n self.contrato.dataAssinaturaContrato = unicode(self.tcDataAssinaturaContrato.GetValue())\n self.contrato.objetivoContrato = unicode(self.tcObjetivoContratado.GetValue())\n self.contrato.numeroProcessoLicitatorio = unicode(self.tcNumeroProcessoLicitatorio.GetValue())\n self.contrato.codigoMoeda = unicode(self.cbCodigoMoeda.GetValue())\n self.contrato.tipoJuridicoContratado = unicode(self.cbTipoJuridicoContratado.GetValue())\n self.contrato.cicContratado = unicode(self.tcCicContratado.GetValue())\n self.contrato.nomeContratado = unicode(self.tcNomeContratado.GetValue())\n self.contrato.dataVencimentoContrato = unicode(self.tcDataVencimentoContrato.GetValue())\n self.contrato.numeroDiarioOficial = unicode(self.tcNumeroDiarioOficial.GetValue())\n self.contrato.dataPublicacaoContrato = unicode(self.tcDataPublicacaoContrato.GetValue())\n self.contrato.recebeValor = unicode(self.cbRecebeValor.GetValue())\n self.contrato.numeroCertidaoINSS = unicode(self.tcNumeroCertidaoINSS.GetValue())\n self.contrato.dataCertidaoINSS = unicode(self.tcDataCertidaoINSS.GetValue())\n self.contrato.dataValidadeINSS = unicode(self.tcDataValidadeINSS.GetValue())\n self.contrato.numeroCertidaoFGTS = unicode(self.tcNumeroCertidaoFGTS.GetValue())\n self.contrato.dataCertidaoFGTS = unicode(self.tcDataCertidaoFGTS.GetValue())\n self.contrato.dataValidadeFGTS = unicode(self.tcDataValidadeFGTS.GetValue())\n self.contrato.numeroCertidaoFazendaEstadual = unicode(self.tcNumeroCertidaoFazendaEstadual.GetValue())\n self.contrato.dataCertidaoFazendaEstadual = unicode(self.tcDataEmissaoFazendaEstadual.GetValue())\n self.contrato.dataValidadeFazendaEstadual = unicode(self.tcDataVencimentoFazendaEstadual.GetValue())\n self.contrato.numeroCertidaoFazendaMunicipal = unicode(self.tcNumeroCertidaoFazendaMunicipal.GetValue())\n self.contrato.dataCertidaoFazendaMunicipal = unicode(self.tcDataEmissaoFazendaMunicipal.GetValue())\n self.contrato.dataValidadeFazendaMunicipal = unicode(self.tcDataVencimentoFazendaMunicipal.GetValue())\n self.contrato.numeroCertidaoFazendaFederal = unicode(self.tcNumeroCertidaoFazendaFederal.GetValue())\n self.contrato.dataCertidaoFazendaFederal = unicode(self.tcDataEmissaoFazendaFederal.GetValue())\n self.contrato.dataValidadeFazendaFederal = unicode(self.tcDataVencimentoFazendaFederal.GetValue())\n self.contrato.numeroCertidaoCNDT = unicode(self.tcNumeroCertidaoCNDT.GetValue())\n self.contrato.dataCertidaoCNDT = unicode(self.tcDataEmissaoCNDT.GetValue())\n self.contrato.dataValidadeCNDT = unicode(self.tcDataVencimentoCNDT.GetValue())\n self.contrato.numeroCertidaoOutras = unicode(self.tcNumeroCertidaoOutras.GetValue())\n self.contrato.dataCertidaoOutras = unicode(self.tcDataEmissaoOutras.GetValue())\n self.contrato.dataValidadeOutras = unicode(self.tcDataVencimentoOutras.GetValue())\n self.contrato.tipoContrato = unicode(self.cbTipoContrato.GetValue())\n self.contrato.numeroContratoAnterior = unicode(self.tcNumeroContratoSuperior.GetValue())\n self.contrato.tipoDoAditivo = unicode(self.tcTipoAditivo.GetValue())\n self.contrato.cnpjOriginal = unicode(self.tcCnpjOriginal.GetValue())\n self.contrato.dataCompetencia = unicode(self.tcDataCompetencia.GetValue())\n self.contrato.competencia = unicode(self.cbCompetencia.GetValue())\n\n session.commit()\n self.message = wx.MessageDialog(None, u'Contrato foi alterado com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n self.insereContratoListCtrl(None)\n self.contrato = None\n self.windowEditaContrato.Close()\n\n def excluiContrato(self, event, idContrato):\n\n if idContrato is None:\n self.message = wx.MessageDialog(None, u'Selecione um item na lista!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n remove_dial = wx.MessageDialog(None, u'Tem certeza que deseja excluir este contrato?', 'Remover - Contrato', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)\n ret = remove_dial.ShowModal()\n if ret == wx.ID_YES:\n self.contrato = Contrato.query.filter_by(id=idContrato).first()\n self.contrato.delete()\n session.commit()\n self.insereContratoListCtrl(None)\n self.anulaIdItemSelecionado(None)\n self.message = wx.MessageDialog(None, u'Contrato excluído com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n else:\n pass\n\n def editaInsereSiglaContrato(self, event):\n\n self.message = wx.MessageDialog(None, u'O Número do contrato é alterado quando se muda o Tipo de contrato!', 'Info', wx.OK)\n self.message.ShowModal()\n self.tcNumeroContratado.SetValue(\"\")\n self.tcNumeroContratado.SetValue(unicode(self.siglasContratos[self.cbTipoContrato.GetValue()]))\n\n def insereSiglaContrato(self, event):\n\n self.tcNumeroContratado.SetValue(\"\")\n self.tcNumeroContratado.SetValue(unicode(self.siglasContratos[self.cbTipoContrato.GetValue()]))\n\n def validateDate(self, date, field):\n\n if date == \" / / \":\n self.message = wx.MessageDialog(None, u'O campo '+field+' deve ser preenchido!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if date[0:2] == ' ':\n self.message = wx.MessageDialog(None, u'Preencha o dia no campo '+field, 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if date[3:5] == ' ':\n self.message = wx.MessageDialog(None, u'Preencha o mês no campo '+field, 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if date[6:] == ' ':\n self.message = wx.MessageDialog(None, u'Preencha o ano no campo '+field, 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if int(date[0:2]) < 1 or int(date[0:2]) > 31:\n self.message = wx.MessageDialog(None, u'No campo '+field+u' o dia deve estar entre 1 e 31!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if int(date[3:5]) < 1 or int(date[3:5]) > 12:\n self.message = wx.MessageDialog(None, u'No campo '+field+u' o mês deve estar entre 1 e 12!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if int(date[6:]) < 1900:\n self.message = wx.MessageDialog(None, u'No campo '+field+u' o ano deve estar no formato de quatro dígitos!E ser maior que 1900!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n if int(date[3:5]) == 2:\n if int(date[0:2]) > 29:\n self.message = wx.MessageDialog(None, u'Campo: '+field+u'\\nNo mês de Fevereiro nunca tem um dia maior que 29!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n else:\n try:\n datetime.date(int(date[6:10]), int(date[3:5]), int(date[0:2]))\n except ValueError:\n self.message = wx.MessageDialog(None, u'Campo: '+field+u'\\nEste ano Fevereiro não possui o dia 29!', 'Info', wx.OK)\n self.message.ShowModal()\n return 0\n return 1\n\n def valida(self):\n\n if self.cbCompetencia.GetSelection() == -1:\n self.message = wx.MessageDialog(None, u'Selecione uma opção no campo Competência', 'Info', wx.OK)\n self.cbCompetencia.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.cbTipoContrato.GetSelection() == -1:\n self.message = wx.MessageDialog(None, u'Selecione uma opção no campo Tipo ', 'Info', wx.OK)\n self.cbTipoContrato.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.cbRecebeValor.GetSelection() == -1:\n self.message = wx.MessageDialog(None, u'Selecione uma opção no campo Recebe Valor', 'Info', wx.OK)\n self.cbRecebeValor.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.cbTipoJuridicoContratado.GetSelection() == -1:\n self.message = wx.MessageDialog(None, u'Selecione uma opção no campo Tipo Pessoa', 'Info', wx.OK)\n self.cbTipoJuridicoContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.cbTipoJuridicoContratado.GetSelection() == 0:\n if self.tcCicContratado.GetValue() == \" . . - \":\n self.message = wx.MessageDialog(None, u'O campo CNPJ ou CPF deve ser preenchido', 'Info', wx.OK)\n self.tcCicContratado.SelectAll()\n self.tcCicContratado.SetFocus()\n self.message.ShowModal()\n return 0 \n\n if not burocracia.CPF(self.tcCicContratado.GetValue()).isValid():\n self.message = wx.MessageDialog(None, u'CPF inválido!', 'Info', wx.OK)\n self.tcCicContratado.SelectAll()\n self.tcCicContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n if not self.tcCnpjOriginal.GetValue() == \" . . / - \":\n\n if not burocracia.CNPJ(self.tcCnpjOriginal.GetValue()).isValid():\n self.message = wx.MessageDialog(None, u'CNPJ da UG do contrato original inválido!', 'Info', wx.OK)\n self.tcCnpjOriginal.SelectAll()\n self.tcCnpjOriginal.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.cbTipoJuridicoContratado.GetSelection() == 1:\n if self.tcCicContratado.GetValue() == \" . . / - \":\n self.message = wx.MessageDialog(None, u'O campo CNPJ ou CPF deve ser preenchido', 'Info', wx.OK)\n self.tcCicContratado.SelectAll()\n self.tcCicContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n if not burocracia.CNPJ(self.tcCicContratado.GetValue()).isValid():\n self.message = wx.MessageDialog(None, u'CNPJ inválido!', 'Info', wx.OK)\n self.tcCicContratado.SelectAll()\n self.tcCicContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.cbTipoJuridicoContratado.GetSelection() == 2:\n if self.tcCicContratado.GetValue() == '':\n self.message = wx.MessageDialog(None, u'Digite o identificador no campo CNPJ ou CPF', 'Info', wx.OK)\n self.tcCicContratado.SelectAll()\n self.tcCicContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.tcNomeContratado.GetValue() == \"\":\n self.message = wx.MessageDialog(None, u'O campo Nome deve ser preenchido', 'Info', wx.OK)\n self.tcNomeContratado.SelectAll()\n self.tcNomeContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n contrato = Contrato.query.filter_by(numeroContrato=self.tcNumeroContratado.GetValue()).first()\n\n if contrato != None:\n if (unicode(contrato.numeroContrato.upper()) == unicode(self.tcNumeroContratado.GetValue().upper())) and (contrato.id == int(self.tcId.GetValue())):\n pass\n else:\n self.message = wx.MessageDialog(None, u'Já existe um contrato com a numeração: '+self.tcNumeroContratado.GetValue()+u'!', 'Info', wx.OK)\n self.tcNumeroContratado.SelectAll()\n self.tcNumeroContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.tcNumeroContratado.GetValue() == \"\":\n self.message = wx.MessageDialog(None, u'O campo Número deve ser preenchido', 'Info', wx.OK)\n self.tcNumeroContratado.SelectAll()\n self.tcNumeroContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.tcObjetivoContratado.GetValue() == \"\":\n self.message = wx.MessageDialog(None, u'O campo Objetivo deve ser preenchido', 'Info', wx.OK)\n self.tcObjetivoContratado.SelectAll()\n self.tcObjetivoContratado.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.cbRecebeValor.GetValue() == u'S':\n\n if self.cbCodigoMoeda.GetSelection() == -1:\n self.message = wx.MessageDialog(None, u'Selecione uma opção no campo Tipo de Moeda', 'Info', wx.OK)\n self.cbCodigoMoeda.SetFocus()\n self.message.ShowModal()\n return 0\n\n if self.tcValorContrato.GetValue() == 0.0:\n self.message = wx.MessageDialog(None, u'O campo Valor não pode ter o valor igual a 0', 'Info', wx.OK)\n self.tcValorContrato.SelectAll()\n self.tcValorContrato.SetFocus()\n self.message.ShowModal()\n return 0\n\n if not self.validateDate(self.tcDataAssinaturaContrato.GetValue(), u\"Data de Assinatura\"):\n self.tcDataAssinaturaContrato.SelectAll()\n self.tcDataAssinaturaContrato.SetFocus()\n return 0\n\n if not self.validateDate(self.tcDataVencimentoContrato.GetValue(), u\"Data de Venc.\"):\n self.tcDataVencimentoContrato.SelectAll()\n self.tcDataVencimentoContrato.SetFocus()\n return 0\n \n if self.tcNumeroDiarioOficial.GetValue() == \"\":\n self.message = wx.MessageDialog(None, u'O campo Número Diário Oficial deve ser preenchido', 'Info', wx.OK)\n self.tcNumeroDiarioOficial.SelectAll()\n self.tcNumeroDiarioOficial.SetFocus()\n self.message.ShowModal()\n return 0\n\n #if not self.validateDate(self.tcDataPublicacaoContrato.GetValue(), u\"Data de Publicação\"):\n # self.tcDataPublicacaoContrato.SelectAll()\n # self.tcDataPublicacaoContrato.SetFocus()\n # return 0\n\n if self.tcNumeroCertidaoINSS.GetValue() != \"\":\n if not self.validateDate(self.tcDataCertidaoINSS.GetValue(), u\"Data Emissão (INSS)\"):\n self.tcDataCertidaoINSS.SelectAll()\n self.tcDataCertidaoINSS.SetFocus()\n return 0\n\n if not self.validateDate(self.tcDataValidadeINSS.GetValue(), u\"Data de Venc. (INSS)\"):\n self.tcDataValidadeINSS.SelectAll()\n self.tcDataValidadeINSS.SetFocus()\n return 0\n\n if self.tcNumeroCertidaoFGTS.GetValue() != \"\":\n if not self.validateDate(self.tcDataCertidaoFGTS.GetValue(), u\"Data Emissão (FGTS)\"):\n self.tcDataCertidaoFGTS.SelectAll()\n self.tcDataCertidaoFGTS.SetFocus()\n return 0\n\n if not self.validateDate(self.tcDataValidadeFGTS.GetValue(), u\"Data de Venc. (FGTS)\"):\n self.tcDataValidadeFGTS.SelectAll()\n self.tcDataValidadeFGTS.SetFocus()\n return 0\n\n if self.tcNumeroCertidaoFazendaEstadual.GetValue() != \"\":\n if not self.validateDate(self.tcDataEmissaoFazendaEstadual.GetValue(), u\"Data Emissão (Fazenda Estadual)\"):\n self.tcDataEmissaoFazendaEstadual.SelectAll()\n self.tcDataEmissaoFazendaEstadual.SetFocus()\n return 0\n\n if not self.validateDate(self.tcDataVencimentoFazendaEstadual.GetValue(), u\"Data de Venc. (Fazenda Estadual)\"):\n self.tcDataVencimentoFazendaEstadual.SelectAll()\n self.tcDataVencimentoFazendaEstadual.SetFocus()\n return 0\n\n if self.tcNumeroCertidaoFazendaFederal.GetValue() != \"\":\n if not self.validateDate(self.tcDataEmissaoFazendaFederal.GetValue(), u\"Data Emissão (Fazenda Federal)\"):\n self.tcDataEmissaoFazendaEstadual.SelectAll()\n self.tcDataEmissaoFazendaEstadual.SetFocus()\n return 0\n\n if not self.validateDate(self.tcDataVencimentoFazendaFederal.GetValue(), u\"Data de Venc. (Fazenda Federal)\"):\n self.tcDataVencimentoFazendaFederal.SelectAll()\n self.tcDataVencimentoFazendaFederal.SetFocus()\n return 0\n\n if self.tcNumeroCertidaoFazendaMunicipal.GetValue() != \"\":\n\n if not self.validateDate(self.tcDataEmissaoFazendaMunicipal.GetValue(), u\"Data Emissão (Fazenda Municipal)\"):\n self.tcDataEmissaoFazendaMunicipal.SelectAll()\n self.tcDataEmissaoFazendaMunicipal.SetFocus()\n return 0\n\n if not self.validateDate(self.tcDataVencimentoFazendaMunicipal.GetValue(), u\"Data de Venc. (Fazenda Municipal)\"):\n self.tcDataVencimentoFazendaMunicipal.SelectAll()\n self.tcDataVencimentoFazendaMunicipal.SetFocus()\n return 0\n\n if self.tcNumeroCertidaoCNDT.GetValue() != \"\":\n\n if not self.validateDate(self.tcDataEmissaoCNDT.GetValue(), u\"Data Emissão (CNDT)\"):\n self.tcDataEmissaoOutras.SelectAll()\n self.tcDataEmissaoOutras.SetFocus()\n return 0\n\n if not self.validateDate(self.tcDataVencimentoCNDT.GetValue(), u\"Data de Venc. (CNDT)\"):\n self.tcDataVencimentoOutras.SelectAll()\n self.tcDataVencimentoOutras.SetFocus()\n return 0\n\n if self.tcNumeroCertidaoOutras.GetValue() != \"\":\n\n if not self.validateDate(self.tcDataEmissaoOutras.GetValue(), u\"Data Emissão (Outras)\"):\n self.tcDataEmissaoOutras.SelectAll()\n self.tcDataEmissaoOutras.SetFocus()\n return 0\n\n if not self.validateDate(self.tcDataVencimentoOutras.GetValue(), u\"Data de Venc. (Outras)\"):\n self.tcDataVencimentoOutras.SelectAll()\n self.tcDataVencimentoOutras.SetFocus()\n return 0\n\n return 1\n\n def insereContratoListCtrl(self, event):\n\n self.contratoListCtrl.DeleteAllItems()\n\n if self.cbCompetenciaForView.GetSelection() != -1:\n \n contratos = Contrato.query.filter_by(competencia=self.cbCompetenciaForView.GetValue()).all()\n for contrato in contratos:\n index = self.contratoListCtrl.InsertStringItem(sys.maxint, unicode(contrato.numeroContrato))\n self.contratoListCtrl.SetStringItem(index, 1, contrato.nomeContratado)\n self.contratoListCtrl.SetStringItem(index, 2, contrato.objetivoContrato)\n self.contratoListCtrl.SetStringItem(index, 3, unicode(contrato.id))\n\n def transformaData(self, data):\n\n if data == \" / / \":\n return '00000000'\n else:\n return data[6:]+data[3:5]+data[0:2]\n\n def transformaAAAAMM(self, data):\n\n if data == \" / \":\n return '000000'\n else:\n return data[0:4]+data[5:7]\n\n def retiraCaracteresCpfCnpj(self, cic):\n\n cpf = \"\"\n for x in cic:\n if x == u'.':\n pass\n elif x == u'-':\n pass\n elif x == u'/':\n pass\n else:\n cpf = cpf+x\n return cpf\n\n def geraArquivoWindow(self, event):\n\n self.toolBarControler(False, False, False, False)\n\n self.windowGeraArquivo = wx.MiniFrame(parent=self, id=wx.ID_ANY, size=(680, 470), pos=(300, 170), title=u\"Gerar Arquivo de Contrato\", style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN)\n self.panelGeraArquivo = wx.Panel(self.windowGeraArquivo, wx.ID_ANY)\n\n wx.StaticBox(self.panelGeraArquivo, -1, pos=(0, 0), size=(660, 60))\n\n self.tipoContrato = {u'Termo de Contrato' : '1', u'Termo Aditivo ao Contrato' : '2', u'Termo de Re-Ratificação de Contrato' : '3', u'Termo de Distrato de Contrato' : '4',\n u'Termo de Rescisão de Contrato' : '5', u'Termo Concessão de Uso' : '6', u'Termo de Aditivo de Concessão de Uso' : '7', u'Termo de Permissão de Uso' : '8',\n u'Termo Aditivo de Permissão de Uso' : '9', u'Termo de Autorização de Uso' : '10', u'Termo Aditivo de Autorização de Uso' : '11', u'Termo de Cessão' : '12',\n u'Termo Aditivo a Cessão' : '13', u'Termo de Compromisso' : '14', u'Termo Aditivo ao Compromisso' : '15', u'Termo de Direito Real de Uso' : '16',\n u'Termo Aditivo ao Direito Real de Uso' : '17', u'Termo de Doação' : '18', u'Carta Contrato' : '19', u'Ordem de Serviços' : '20', \n u'Termo Aditivo a Ordem de Serviços' : '21', u'Termo de Revogação de Autorização de Uso' : '22', u'Termo de Adesão ao Contrato' : '23', \n u'Termo de Outorga' : '24', u'Termo Aditivo de Outorga' : '25', u'Termo de Ex-Ofício' : '26', u'Termo Aditivo de Carta Contrato' : '27', \n u'Termo de Cooperação Técnica' : '28', u'Termo Aditivo de Cooperação Técnica' : '29', u'Termo de Ordem de Serviços' : '30', \n u'Termo de Recebimento de Auxílio Aluguel' : '31', u'Termo de Recebimento de Cheque Moradia' : '32', u'Termo de Recebimento de Indenização' : '33', \n u'Termo de Quitação de Contrato' : '34', u'Protocolo de Intenções' : '35', u'Termo Aditivo de Protocolo de Intenções' : '36', \n u'Termo Aditivo de Doação' : '37', u'Apostila de Retificação de Contrato' : '38', u'Termo de Contrato de Gestão' : '39', \n u'Termo Aditivo de Contrato de Gestão' : '40', u'Termo de Rescisão de Cessão' : '41', u'Termo de Apostilamento de Contrato' : '42', \n u'Apólice de contratação de serviços de seguro': '43', u'Termo Aditivo de Apólice de contratação de serviços de seguro' : '44'}\n\n\n choicesCompetencias = self.choicesCompetencias\n choicesCompetencias.append(u'Todos')\n self.stGeraArquivoCompetencia = wx.StaticText(self.panelGeraArquivo, -1, u'Competência', pos=(10, 10), style=wx.ALIGN_LEFT)\n self.cbGeraArquivoCompetencia = wx.ComboBox(self.panelGeraArquivo, -1, pos=(10, 30), size=(250, -1), choices=choicesCompetencias, style=wx.CB_READONLY)\n self.cbGeraArquivoCompetencia.Bind(wx.EVT_COMBOBOX, self.insereContratoPorCompetencia)\n\n self.competenciaAtual = None\n self.itensGeraArquivoListCtrl = []\n self.itensParaArquivosListCtrl = []\n\n wx.StaticText(self.panelGeraArquivo, -1, u'Inserir:', pos=(10, 70))\n self.contratosGeraArquivoListCtrl = wx.ListCtrl(self.panelGeraArquivo, wx.ID_ANY, pos=(10, 90), size=(250, 300), style=wx.LC_REPORT)\n self.contratosGeraArquivoListCtrl.InsertColumn(0, u'Número do Contrato', width=130)\n self.contratosGeraArquivoListCtrl.InsertColumn(1, u'Nome Contratado', width=120)\n self.contratosGeraArquivoListCtrl.InsertColumn(2, u'', width=0)\n self.contratosGeraArquivoListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.selecionaItensContratosGeraArquivos)\n\n self.btnIncluiContratoGeraArquivo = wx.Button(self.panelGeraArquivo, -1, u\">>\", pos=(290, 200))\n self.btnIncluiContratoGeraArquivo.Bind(wx.EVT_BUTTON, self.insereGeraArquivo)\n self.btnRemoveContratoGeraArquivo = wx.Button(self.panelGeraArquivo, -1, u\"<<\", pos=(290, 250))\n self.btnRemoveContratoGeraArquivo.Bind(wx.EVT_BUTTON, self.removeGeraArquivo)\n\n wx.StaticText(self.panelGeraArquivo, -1, u'Gerar Arquivo Com:', pos=(400, 70))\n self.contratosParaArquivoListCtrl = wx.ListCtrl(self.panelGeraArquivo, wx.ID_ANY, pos=(400, 90), size=(250, 300), style=wx.LC_REPORT)\n self.contratosParaArquivoListCtrl.InsertColumn(0, u'Número do Contrato', width=130)\n self.contratosParaArquivoListCtrl.InsertColumn(1, u'Nome Contratado', width=120)\n self.contratosParaArquivoListCtrl.InsertColumn(2, u'', width=0)\n self.contratosParaArquivoListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.selecionaItensContratosParaArquivo)\n\n self.btnGerarArquivo = wx.Button(self.panelGeraArquivo, -1, \"Gerar Arquivo\", pos=(300, 400))\n self.btnGerarArquivo.Bind(wx.EVT_BUTTON, self.geraArquivoDialog)\n\n self.windowGeraArquivo.Bind(wx.EVT_CLOSE, self.quitGeraArquivo)\n\n self.windowGeraArquivo.Centre()\n self.windowGeraArquivo.Show()\n\n def quitGeraArquivo(self, event):\n\n self.toolBarControler(True, True, True, True)\n self.windowGeraArquivo.Destroy()\n\n def geraArquivoDialog(self, event):\n\n if self.contratosParaArquivoListCtrl.GetItemCount() == 0:\n\n self.message = wx.MessageDialog(None, u'Selecione os Contratos para gerar o arquivo!!', u'Info', wx.OK)\n self.message.ShowModal()\n return 0\n\n dlg = wx.FileDialog(self, message=\"Salvar \", defaultDir=\"\", defaultFile=\"CONTRATO\", wildcard=\"Arquivo de Remessa (*.REM)|*.REM\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n\n self.path = dlg.GetPath()\n if os.path.exists(self.path):\n\n remove_dial = wx.MessageDialog(None, u'Já existe um arquivo '+dlg.GetFilename()+u\".\\n Deseja substituí-lo?\", 'Sair', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)\n ret = remove_dial.ShowModal()\n if ret == wx.ID_YES:\n\n if self.geraArquivo():\n self.message = wx.MessageDialog(None, u'Arquivo de contratos gerados com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n \n else:\n self.message = wx.MessageDialog(None, u'Houve um erro na geração do arquivo!\\nVerifique se você tem permissão de escrita ou se o arquivo já se encontra aberto!', 'Error', wx.OK)\n self.message.ShowModal()\n \n else:\n if self.geraArquivo():\n self.message = wx.MessageDialog(None, u'Arquivo de contratos gerados com sucesso!', 'Info', wx.OK)\n self.message.ShowModal()\n \n else:\n self.message = wx.MessageDialog(None, u'Houve um erro na geração do arquivo!\\nVerifique se você tem permissão de escrita ou se o arquivo já se encontra aberto!', 'Error', wx.OK)\n self.message.ShowModal()\n \n def geraArquivo(self):\n\n f = codecs.open(self.path, \"w\", \"utf-8\")\n\n for x in range(self.contratosParaArquivoListCtrl.GetItemCount()):\n\n try:\n\n idContrato = int(self.contratosParaArquivoListCtrl.GetItem(x, 2).GetText())\n contrato = Contrato.query.filter_by(id=idContrato).first()\n\n f.write(unicode(contrato.numeroContrato.ljust(16).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n partes = contrato.valorContrato.split('.')\n \n if len(partes[1])> 1:\n f.write(unicode((contrato.valorContrato).zfill(16).replace(\".\", \",\")))\n else:\n f.write(unicode((contrato.valorContrato+'0').zfill(16).replace(\".\", \",\"))) \n \n f.write(unicode(self.transformaData(contrato.dataAssinaturaContrato)))\n f.write(unicode(contrato.objetivoContrato.ljust(300).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(contrato.numeroProcessoLicitatorio.ljust(18).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaCodigoMoeda(contrato.codigoMoeda).zfill(3)))\n f.write(unicode(self.transformaTipoJuridico(contrato.tipoJuridicoContratado).zfill(1)))\n f.write(unicode(self.retiraCaracteresCpfCnpj(contrato.cicContratado).zfill(14)))\n f.write(unicode(contrato.nomeContratado.ljust(50).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(contrato.dataVencimentoContrato)))\n f.write(unicode(contrato.numeroDiarioOficial.zfill(6)))\n f.write(unicode(self.transformaData(contrato.dataPublicacaoContrato)))\n f.write(unicode(contrato.recebeValor.zfill(1)))\n\n f.write(unicode(contrato.numeroCertidaoINSS.ljust(60).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(contrato.dataCertidaoINSS)))\n f.write(unicode(self.transformaData(contrato.dataValidadeINSS)))\n\n f.write(unicode(contrato.numeroCertidaoFGTS.ljust(60).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(contrato.dataCertidaoFGTS)))\n f.write(unicode(self.transformaData(contrato.dataValidadeFGTS)))\n\n f.write(unicode(contrato.numeroCertidaoFazendaEstadual.ljust(60).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(contrato.dataCertidaoFazendaEstadual)))\n f.write(unicode(self.transformaData(contrato.dataValidadeFazendaEstadual)))\n\n f.write(unicode(contrato.numeroCertidaoFazendaMunicipal.ljust(60).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(contrato.dataCertidaoFazendaMunicipal)))\n f.write(unicode(self.transformaData(contrato.dataValidadeFazendaMunicipal)))\n\n f.write(unicode(contrato.numeroCertidaoFazendaFederal.ljust(60).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(contrato.dataCertidaoFazendaFederal)))\n f.write(unicode(self.transformaData(contrato.dataValidadeFazendaFederal)))\n\n f.write(unicode(contrato.numeroCertidaoCNDT.ljust(60).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(contrato.dataCertidaoCNDT)))\n f.write(unicode(self.transformaData(contrato.dataValidadeCNDT)))\n\n f.write(unicode(contrato.numeroCertidaoOutras.ljust(60).replace(\"'\", \"\").replace(\"\\\"\", \"\")))\n f.write(unicode(self.transformaData(contrato.dataCertidaoOutras)))\n f.write(unicode(self.transformaData(contrato.dataValidadeOutras)))\n\n f.write(unicode(self.transformaTipoContrato(contrato.tipoContrato).zfill(2)))\n\n f.write(unicode(contrato.numeroContratoAnterior).ljust(16))\n\n f.write(unicode(self.transformaTipoAditivo(contrato.tipoDoAditivo)))\n\n f.write(unicode(self.retiraCaracteresCpfCnpj(contrato.cnpjOriginal).zfill(14)))\n\n f.write(unicode(self.transformaAAAAMM(contrato.dataCompetencia).zfill(6)))\n\n f.write(u\"\\n\")\n except:\n return 0\n\n f.close()\n return 1\n\n def transformaTipoAditivo(self, tipo):\n \n if not tipo:\n return 0\n\n if tipo == u'Acréscimo de valor':\n return 1\n \n if tipo == u'Decréscimo de valor':\n return 2\n\n if tipo == u'Não houve alteração de valor':\n return 3\n\n def transformaTipoContrato(self, tipo):\n \n return self.tipoContrato[tipo]\n\n def transformaTipoJuridico(self, tipo):\n if tipo == u'Física':\n return '1'\n elif tipo == u'Jurídica':\n return '2'\n else:\n return '3'\n\n def transformaCodigoMoeda(self, moeda):\n if moeda == u\"Real\":\n return \"1\"\n elif moeda == u\"Dolar\":\n return \"3\"\n else:\n return \"9\"\n\n def insereGeraArquivo(self, event):\n\n if not self.itensGeraArquivoListCtrl:\n self.message = wx.MessageDialog(None, u'Selecione os contratos a serem inseridos!', 'Info', wx.OK)\n self.message.ShowModal()\n else:\n\n for item in self.itensGeraArquivoListCtrl:\n\n index = self.contratosParaArquivoListCtrl.InsertStringItem(sys.maxint, unicode(self.contratosGeraArquivoListCtrl.GetItem(item, 0).GetText()))\n self.contratosParaArquivoListCtrl.SetStringItem(index, 1, unicode(self.contratosGeraArquivoListCtrl.GetItem(item, 1).GetText()))\n self.contratosParaArquivoListCtrl.SetStringItem(index, 2, unicode(self.contratosGeraArquivoListCtrl.GetItem(item, 2).GetText()))\n\n for item in reversed(self.itensGeraArquivoListCtrl):\n self.contratosGeraArquivoListCtrl.DeleteItem(item)\n\n self.itensGeraArquivoListCtrl = []\n\n def removeGeraArquivo(self, event):\n\n if not self.itensParaArquivosListCtrl:\n self.message = wx.MessageDialog(None, u'Selecione os contratos a serem removidos!', 'Info', wx.OK)\n self.message.ShowModal()\n else:\n\n for item in self.itensParaArquivosListCtrl:\n\n index = self.contratosGeraArquivoListCtrl.InsertStringItem(sys.maxint, unicode(self.contratosParaArquivoListCtrl.GetItem(item, 0).GetText()))\n self.contratosGeraArquivoListCtrl.SetStringItem(index, 1, unicode(self.contratosParaArquivoListCtrl.GetItem(item, 1).GetText()))\n self.contratosGeraArquivoListCtrl.SetStringItem(index, 2, unicode(self.contratosParaArquivoListCtrl.GetItem(item, 2).GetText()))\n\n for item in reversed(self.itensParaArquivosListCtrl):\n self.contratosParaArquivoListCtrl.DeleteItem(item)\n\n self.itensParaArquivosListCtrl = []\n\n def selecionaItensContratosGeraArquivos(self, event):\n\n item = self.contratosGeraArquivoListCtrl.GetFirstSelected()\n self.itensGeraArquivoListCtrl = []\n while item != -1:\n self.itensGeraArquivoListCtrl.append(item)\n item = self.contratosGeraArquivoListCtrl.GetNextSelected(item)\n\n def selecionaItensContratosParaArquivo(self, event):\n\n item = self.contratosParaArquivoListCtrl.GetFirstSelected()\n self.itensParaArquivosListCtrl = []\n while item != -1:\n self.itensParaArquivosListCtrl.append(item)\n item = self.contratosParaArquivoListCtrl.GetNextSelected(item)\n\n def insereContratoPorCompetencia(self, event):\n\n contratos = []\n if self.competenciaAtual == unicode(self.cbGeraArquivoCompetencia.GetValue()):\n return 0\n\n elif self.cbGeraArquivoCompetencia.GetValue() != u'Todos':\n\n contratos = Contrato.query.filter_by(competencia=self.cbGeraArquivoCompetencia.GetValue()).all()\n else:\n\n contratos = Contrato.query.all()\n\n self.contratosGeraArquivoListCtrl.DeleteAllItems()\n\n if not contratos:\n self.message = wx.MessageDialog(None, u'Não existe contratos para esta competência!', 'Info', wx.OK)\n self.message.ShowModal()\n\n else:\n\n if len(contratos) == self.contratosParaArquivoListCtrl.GetItemCount():\n pass\n else:\n\n for contrato in contratos:\n igual = False\n if self.contratosParaArquivoListCtrl.GetItemCount() == 0:\n index = self.contratosGeraArquivoListCtrl.InsertStringItem(sys.maxint, unicode(contrato.numeroContrato))\n self.contratosGeraArquivoListCtrl.SetStringItem(index, 1, unicode(contrato.nomeContratado))\n self.contratosGeraArquivoListCtrl.SetStringItem(index, 2, unicode(contrato.id))\n igual = True\n\n else:\n\n for x in range(self.contratosParaArquivoListCtrl.GetItemCount()):\n\n if contrato.numeroContrato == unicode(self.contratosParaArquivoListCtrl.GetItem(x, 0).GetText()):\n igual = True\n\n if not igual:\n index = self.contratosGeraArquivoListCtrl.InsertStringItem(sys.maxint, unicode(contrato.numeroContrato))\n self.contratosGeraArquivoListCtrl.SetStringItem(index, 1, unicode(contrato.nomeContratado))\n self.contratosGeraArquivoListCtrl.SetStringItem(index, 2, unicode(contrato.id))\n\n self.competenciaAtual = unicode(self.cbGeraArquivoCompetencia.GetValue())\n","sub_path":"WindowContrato.py","file_name":"WindowContrato.py","file_ext":"py","file_size_in_byte":84887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"371330873","text":"# pca를 통해 0.95 인 것은 몇 개?\n# m31로 만든 0.95 이상의 n_componet를 사용하여 xgboost 모델 생성 \n# cnn과 비교\nimport numpy as np\nfrom tensorflow.keras.datasets import mnist\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\nfrom xgboost import XGBClassifier\nfrom sklearn.metrics import accuracy_score\n\n# Data\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\ndata = np.append(x_train, x_test, axis=0)\ndata = data.reshape(70000, 28*28) # 3차원은 PCA에 들어가지 않으므로 2차원으로 바꿔준다.\nprint(data.shape) # (70000, 784)\ndata = data/255.\n\ntarget = np.append(y_train, y_test, axis=0)\nprint(target.shape) # (70000,)\n\n# pca = PCA() \n# pca.fit(data)\n# cumsum = np.cumsum(pca.explained_variance_ratio_)\n# print(\"cumsum : \", cumsum)\n\n# d = np.argmax(cumsum >= 0.95)+1\n# print(\"cumsum >= 0.95\", cumsum > 0.95)\n# print(\"d : \", d) # d : 154\n\n# import matplotlib.pyplot as plt\n# plt.plot(cumsum)\n# plt.grid()\n# plt.show()\n\npca = PCA(n_components=154)\ndata2 = pca.fit_transform(data)\n\nprint(data2.shape) # (70000, 154)\n\nx_train, x_test, y_train, y_test = train_test_split(data2, target, test_size=0.3, shuffle=True, random_state=47)\nprint(x_train.shape) # (56000, 154)\nprint(x_test.shape) # (14000, 154)\n\n# from tensorflow.keras.utils import to_categorical\n# y_train = to_categorical(y_train)\n# y_test = to_categorical(y_test)\n# print(y_train.shape) # (56000, 10)\n# print(y_test.shape) # (14000, 10)\n\n# Modeling\nmodel = XGBClassifier(n_jobs = 8, use_label_encoder=False)\n\n# Fitting\nmodel.fit(x_train, y_train, eval_metric='logloss')\n\n# Prediction\ny_pred = model.predict(x_test) \n\n# Evaluate\nresult = model.score(x_test, y_test)\nprint(\"result : \", result)\n\nscore = accuracy_score(y_pred, y_test)\nprint(\"accuracy_score : \", score)\n\n\n# CNN\n# loss : 0.034563612192869186\n# acc : 0.9889000058174133\n# y_test[:10] : [7 2 1 0 4 1 4 9 5 9]\n# y_pred[:10] : [7 2 1 0 4 1 4 9 5 9]\n\n# DNN\n# loss : 0.10550455003976822\n# acc : 0.9828000068664551\n# y_test[:10] : [7 2 1 0 4 1 4 9 5 9]\n# y_pred[:10] : [7 2 1 0 4 1 4 9 5 9]\n\n# PCA(>0.95) - DNN\n# loss : 0.09774444252252579\n# acc : 0.9767143130302429\n# y_test[:10] : [3 1 8 1 6 3 5 4 8 3]\n# y_pred[:10] : [3 1 8 1 6 3 5 4 8 3]\n\n# PCA(>1.0) - DNN\n# loss : 0.14994649589061737\n# acc : 0.9728571176528931\n# y_test[:10] : [3 1 8 1 6 3 5 4 8 3]\n# y_pred[:10] : [3 1 6 1 6 3 5 4 8 3]\n\n# PCA(>0.95) - XGBoost\n# result : 0.9644285714285714\n# acc score : 0.9644285714285714","sub_path":"ML,DL, RL/Machine Learning/ml/m33_pca_mnist1_xgb.py","file_name":"m33_pca_mnist1_xgb.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"226968296","text":"import json\nimport math\nimport sys\nfrom matplotlib import pyplot as plt\n\n\ndef pcc(x, y):\n # Pearson correlation coefficient\n n = len(x)\n v1 = n * sum([x[i]*y[i] for i in range(n)]) - sum(x)*sum(y)\n v2 = math.sqrt(n*sum([x[i]**2 for i in range(n)]) - sum(x)**2)\n v3 = math.sqrt(n*sum([y[i]**2 for i in range(n)]) - sum(y)**2)\n return v1 / (v2*v3)\n\n\ndef num_sim(x, y):\n if x == 0 and y == 0:\n return 1\n else:\n return 1 - (abs(x-y))/(abs(x)+abs(y))\n\n\ndef mean_list_sim(x,y):\n n = len(x)\n return sum([num_sim(x[i], y[i]) for i in range(n)]) / n\n\n\ndef event_prop():\n event_prop = json.loads(open('proportion.json', 'r').read())\n\n parseHTML = []\n layout = []\n evaluateScript = []\n\n for p in event_prop['res']:\n parseHTML.append(p['ParseHTML'])\n layout.append(p['Layout'])\n evaluateScript.append(p['EvaluateScript'])\n\n parseHTML.sort()\n layout.sort()\n evaluateScript.sort()\n\n plt.plot(parseHTML, marker='.')\n plt.plot(layout, marker='o')\n plt.plot(evaluateScript, marker='x')\n\n plt.legend(['ParseHTML', 'Layout', 'EvaluateScript'])\n plt.ylabel('Duration Proportion')\n plt.title('Duration Proportion of Different Events')\n plt.show()\n\n\ndef running_duration():\n event_prop = json.loads(open('running_duration.json', 'r').read())\n\n renderer_main = []\n compositor = []\n tileworker = []\n\n for p in event_prop['res']:\n v = 0\n for k in p:\n if k == 'CrRendererMain':\n p[k] = max(0.00001, p[k])\n renderer_main.append(p[k])\n elif k == 'Compositor':\n p[k] = max(0.00001, p[k])\n compositor.append(p[k])\n elif k.startswith('CompositorTileWorker'):\n v += p[k]\n\n v = max(0.00001, v)\n tileworker.append(v)\n\n renderer_main.sort()\n compositor.sort()\n tileworker.sort()\n\n plt.plot(renderer_main, marker='o')\n plt.plot(compositor, marker='.')\n plt.plot(tileworker, marker='x')\n\n plt.yscale('log')\n plt.ylim([0.00001, 0.5])\n plt.legend(['Renderer', 'Compositor', 'TileWorker'])\n plt.ylabel('Running Duration')\n plt.title('Running Duration Proportion of Different Threads')\n plt.show()\n\n\ndef pipeline_task_proportion(file):\n data = json.loads(open(file, 'r').read())\n legends = [\n 'UpdateLayoutTree',\n 'Layout',\n 'UpdateLayerTree',\n 'Paint',\n 'UpdateLayer',\n 'CompositeLayers',\n 'FireAnimationFrame',\n 'FunctionCall',\n 'EventDispatch',\n 'HitTest'\n ]\n colors = ['r', 'g', 'b', 'c', 'm', 'y',\n 'k', 'lightcoral', 'darkcyan', 'violet']\n # l = len(data['ptd'])\n # ylimit = 0\n stats = []\n\n # print(l, 'tasks are processed')\n # print('Task info: max, avg, median, min (in ms)')\n for i in range(10):\n v = []\n for arr in data['ptd']:\n if (arr[i] > 0):\n v.append(arr[i]/1000)\n # ylimit = max(ylimit, max(v))\n # plt.bar(range(l*i, l*(i+1)), v, color=colors[i])\n v.sort()\n l = len(v)\n if l > 0:\n stats.append([max(v), sum(v)/l, v[int(l/2)], min(v)])\n else:\n stats.append([0, 0, 0, 0])\n\n # plt.legend(legends)\n # plt.xlim([0, l*10+1])\n # plt.yscale('log')\n # plt.ylim([0, ylimit*1.01])\n # plt.show()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n l_ = 4\n for i in range(10):\n plt.bar(range(l_*i, l_*(i+1)), stats[i], color=colors[i])\n for j in range(l_):\n ax.annotate('%.2f' % stats[i][j], xy=(l_*i+j, stats[i][j]))\n ax.annotate('Pipeline count: %d' %\n len(data), xy=(20, ax.get_ylim()[1] * 0.8))\n ax.annotate('Values are max, avg, median, and min (in ms).',\n xy=(20, ax.get_ylim()[1] * 0.8 - 20))\n plt.legend(legends)\n plt.show()\n\n\ndef pipeline_pcc(file1, file2):\n d1 = json.loads(open(file1, 'r').read())['ptd']\n d2 = json.loads(open(file2, 'r').read())['ptd']\n res = []\n for i in range(10):\n x = [max(d1[i]), sum(d1[i])/len(d1[i]), d1[i]\n [int(len(d1[i])/2)], min(d1[i])]\n y = [max(d2[i]), sum(d2[i])/len(d2[i]), d2[i]\n [int(len(d2[i])/2)], min(d2[i])]\n v = pcc(x, y)\n res.append(v)\n print(res)\n\n\ndef pipeline_sim(file1, file2):\n d1 = json.loads(open(file1, 'r').read())['ptd']\n d2 = json.loads(open(file2, 'r').read())['ptd']\n mean_sim = []\n for i in range(10):\n x = [max(d1[i]), sum(d1[i])/len(d1[i]), d1[i]\n [int(len(d1[i])/2)], min(d1[i])]\n y = [max(d2[i]), sum(d2[i])/len(d2[i]), d2[i]\n [int(len(d2[i])/2)], min(d2[i])]\n v = mean_list_sim(x, y)\n mean_sim.append(v)\n # mean_sim.sort()\n return mean_sim\n\n\ndef test():\n data = json.loads(open('rd.json', 'r').read())['res']\n tags = ['inTaskDelay', 'networkDelay', 'normalDelay']\n for v in data:\n plt.clf()\n total = [0 for i in range(len(v['rd']))]\n for k in tags:\n line = []\n idx = 0\n for item in v['rd']:\n line.append(item[k])\n total[idx] += item[k]\n idx += 1\n line.sort()\n plt.plot(line)\n # total.sort()\n # plt.plot(total)\n plt.legend(tags)\n plt.title(v['domain'][:-5])\n plt.savefig('img/' + v['domain'][:-5] + '.png')\n\ndef ptd_sim():\n data = json.loads(open('ptd-sim.json', 'r').read())['res']\n \n data = [v[:6] for v in data]\n\n max_sim = [max(v) for v in data]\n max_sim.sort()\n min_sim = [min(v) for v in data]\n min_sim.sort()\n avg_sim = [sum(v) / 10 for v in data]\n avg_sim.sort()\n \n plt.plot(max_sim)\n plt.plot(min_sim)\n plt.plot(avg_sim)\n plt.legend(['Max Sim', 'Min Sim', 'Avg Sim'])\n plt.show()\n\n\ndef bar_chart():\n tasks = ['ParseHTML', 'ParseCSS', 'UpdateLayoutTree', 'Layout', 'UpdateLayerTree', 'UpdateLayer', 'Paint', 'CompositeLayers']\n v0 = [1348.9,36.7,12876.4,21899.6,421.3,38.6,610,237.9]\n v1 = [1332.3,93.4,13264.9,24337.5,452.1,37.3,752.3,237]\n v2 = [1395.4,37.9,12656.2,23543.2,421.5,35.9,743.9,241.6]\n # v3 = [1432.6,79.1,12014.7,20687.6,420.6,33.2,406.9,322.8]\n # v4 = [341.4,1882.2,758.5,684.5,262,1031.3]\n\n plt.scatter(range(8), v0)\n plt.scatter(range(8), v1, marker='.')\n plt.scatter(range(8), v2, marker='^')\n # plt.scatter(range(8), v3, marker='x')\n # plt.scatter(range(6), v4, marker='v')\n\n plt.xticks(range(len(tasks)), tasks, rotation=45)\n plt.legend(['Baseline', 'Nth-child', 'In Tag'])\n plt.title('CSS Selector Complexity: Render Task Duration ($\\mu$s)')\n\n plt.show()\n\nbar_chart()\n","sub_path":"render/src/v0.0/plotify.py","file_name":"plotify.py","file_ext":"py","file_size_in_byte":6732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"513665119","text":"import config.package\n\nclass Configure(config.package.CMakePackage):\n def __init__(self, framework):\n config.package.CMakePackage.__init__(self, framework)\n self.gitcommit = '60be4cf1536c2a97bbf8f32903c5dae0d47a2b04'\n self.giturls = ['https://github.com/elemental/Elemental']\n self.download = ['http://libelemental.org/pub/releases/Elemental-0.84-p5.tgz']\n self.liblist = [['libelemental.a','libpmrrr.a']]\n self.includes = ['elemental.hpp']\n self.cxx = 1\n self.requirescxx11 = 1\n self.downloadonWindows= 0\n return\n\n def setupDependencies(self, framework):\n config.package.CMakePackage.setupDependencies(self, framework)\n self.compilerFlags = framework.require('config.compilerFlags', self)\n self.blasLapack = framework.require('config.packages.BlasLapack',self)\n self.mpi = framework.require('config.packages.MPI',self)\n self.deps = [self.mpi,self.blasLapack]\n #\n # also requires the ./configure option --with-cxx-dialect=C++11\n return\n\n def formCMakeConfigureArgs(self):\n args = config.package.CMakePackage.formCMakeConfigureArgs(self)\n args.append('-DMATH_LIBS:STRING=\"'+self.libraries.toString(self.blasLapack.dlib)+'\"')\n args.append('-DUSE_QT5=OFF') # otherwise we would need Qt5 include paths to compile\n\n self.framework.pushLanguage('C')\n args.append('-DMPI_C_COMPILER=\"'+self.framework.getCompiler()+'\"')\n if self.framework.argDB['with-64-bit-indices']:\n args.append('-DUSE_64BIT_INTS=ON')\n self.framework.popLanguage()\n\n self.framework.pushLanguage('Cxx')\n if config.setCompilers.Configure.isSolaris():\n raise RuntimeError('Sorry, Elemental does not compile with Oracle/Solaris/Sun compilers')\n args.append('-DMPI_CXX_COMPILER=\"'+self.framework.getCompiler()+'\"')\n self.framework.popLanguage()\n\n if hasattr(self.compilers, 'FC'):\n self.framework.pushLanguage('FC')\n args.append('-DMPI_Fortran_COMPILER=\"'+self.framework.getCompiler()+'\"')\n self.framework.popLanguage()\n return args\n\n\n\n\n","sub_path":"config/BuildSystem/config/packages/elemental.py","file_name":"elemental.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"652915450","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom django101.cities.models import Person\n\n\ndef index(req):\n context = {\n \"name\": \"Simo\",\n \"people\": Person.objects.all()\n\n }\n return render(req, 'index.html', context)\n\n\ndef list_phones(request):\n context = {\n 'phones': [\n {\n 'name': \"GalaxyS500\",\n 'quantity': 3\n },\n {\n 'name': 'Xiaoiu',\n 'quantity': 0\n },\n {\n 'name': 'iPhone18',\n 'quantity': 4\n }\n ]\n # 'phones': []\n }\n context['message'] = \"Phones List\"\n return render(request, 'phones.html', context)","sub_path":"python_web_basics/lesson_02_url`s_and_templates/django101/cities/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"200745162","text":"import bpy\nimport os\nimport time\n\nfrom bpy.app.handlers import persistent\n\n\ndef reload_images():\n #reload all images\n for i in bpy.data.images:\n i.reload()\n\n #update viewers\n wman=bpy.data.window_managers['WinMan']\n for win in wman.windows:\n scr = win.screen\n for area in win.screen.areas:\n area.tag_redraw()\n #redraw if viewport in rendered mode\n region = [region for region in area.regions if region.type == 'WINDOW']\n if area.type=='VIEW_3D':\n for space in area.spaces: # iterate through spaces in current VIEW_3D area\n #print(space.shading.type)\n if False and space.type == 'VIEW_3D' and space.shading.type in ['MATERIAL','RENDERED']: # check if space is a 3D view\n override = {'window':win,\n 'screen':scr,\n 'area' :area,\n 'region':region,\n 'scene' :bpy.context.scene,\n 'blend_data' :bpy.context.blend_data\n }\n bpy.ops.view3d.toggle_render(override)\n bpy.ops.view3d.toggle_render(override)\n elif area.type=='IMAGE_EDITOR':\n for space in area.spaces:\n if space.type == 'IMAGE_EDITOR':\n s=space\n override = {'window':win,\n 'screen':scr,\n 'area' :area,\n 'region':region,\n 'scene' :bpy.context.scene,\n 'blend_data' :bpy.context.blend_data,\n 'edit_image' :s.image\n }\n bpy.ops.image.reload(override)\n elif area.type=='NODE_EDITOR':\n for space in area.spaces:\n if space.type == 'NODE_EDITOR':\n if space.show_backdrop == True:\n space.show_backdrop = True\n\n \n \n return{\"FINISHED\"}\n\ndef get_modification_times():\n for i in bpy.data.images:\n try:\n path=os.path.abspath(bpy.path.abspath(i.filepath))\n i.modification_time=str(os.path.getmtime(path))\n except FileNotFoundError:\n i.modification_time=\"missing\"\n return{\"FINISHED\"}\n\n@persistent\ndef reload_startup(scene):\n for i in bpy.data.images:\n try:\n path=os.path.abspath(bpy.path.abspath(i.filepath))\n i.modification_time=str(os.path.getmtime(path))\n except FileNotFoundError:\n i.modification_time=\"missing\"\n print(\"Auto Reload Images --- All images modification time updated\")","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"528248401","text":"class Single(object):\n _instance = None\n def __new__(cls, *args, **kw):\n if not cls._instance:\n cls._instance = super(Single, cls).__new__(cls)\n return cls._instance \n def __init__(self, name, age):\n self.name = name\n self.age = age\n\na = Single('kkp', 18)\n# print(a.name)\nb = Single(\"haha\", 19)\n# print(b.name) # haha\n# print(a.name) # haha\n# print(id(a),id(b)) # 2615182656512 2615182656512\n\nclass A:\n def __init__(self):\n self.name = \"zhangp\"\n \n def method(self):\n print(\"method print\")\n \na = A()\n\nprint(getattr(a, 'name', \"no name\"))\nprint(getattr(a, 'age', \"no age\"))\n\nprint(getattr(a, 'method', 'no methos'))\n\nsetattr(a, 'name1', 'cat')\n\nprint(a.name1)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"danlimoshi.py","file_name":"danlimoshi.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"498106146","text":"import os\nfrom Caminhos import Caminhos\nfrom Arquivo import Arquivo\nfrom GoogleSheets import GoogleSheets\n\n\nclass Start:\n def __init__(self):\n self.arquivo = Arquivo()\n self.sheet = GoogleSheets()\n self.atualizarArquivos()\n\n def atualizarArquivos(self):\n apps = os.listdir(Caminhos.todosAppsAndroid)\n for app in apps:\n if not app.__eq__(\".idea\") and not app.__eq__(\".gradle\") and not app.__eq__(\".DS_Store\"):\n local = Caminhos.todosAppsAndroid + app\n versao, pacote = self.arquivo.getVersion(local)\n self.sheet.defVersao(pacote.replace(\" \", \"\").replace(\"\\n\", \"\"), versao)\n print(pacote)\n\n\nStart()\n","sub_path":"Teste/AreaTestes.py","file_name":"AreaTestes.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"402211346","text":"from django.core.management.base import BaseCommand\nfrom django_chrome_bookmarks.models import Folder, Url\nimport chrome_bookmarks\nimport json\nimport os\n\nclass Command(BaseCommand):\n help = 'import bookmarks from file'\n\n def add_arguments(self, parser):\n parser.add_argument(\n dest='path',\n default=chrome_bookmarks.path,\n nargs='?',\n help='bookmarks file path',\n )\n\n def save_folder(self,data,parent=None,key=None):\n folder = chrome_bookmarks.Item(data)\n parent = Folder(id=folder[\"id\"], key=key,name=folder[\"name\"], parent=parent,date_added = folder[\"date_added\"], date_modified = folder.get(\"date_modified\",0))\n parent.save()\n for url in folder.urls:\n Url(id=url[\"id\"],folder=parent,name=url[\"name\"], url=url[\"url\"], date_added = url[\"date_added\"], date_modified = url.get(\"date_modified\",0),last_visited_desktop=url[\"meta_info\"].get(\"last_visited_desktop\",0)).save()\n for f in folder.folders:\n self.save_folder(f,parent=parent)\n\n def handle(self, *args, **options):\n path = options.get('path')\n Url.objects.all().delete()\n Folder.objects.all().delete()\n for key, data in json.loads(open(path).read())[\"roots\"].items():\n self.save_folder(data,key=key)\n\n","sub_path":"django_chrome_bookmarks/management/commands/import-bookmarks.py","file_name":"import-bookmarks.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"272075700","text":"from threading import Event\nfrom threading import Thread as _Thread\nimport websocket\nimport json\nimport time\n\n\nclass WebQuikConnector:\n _handlers = {}\n\n def __init__(self, url, login, password):\n\n self._conn = url\n self._password = password\n self._login = login\n self._ws = websocket.WebSocketApp(self._conn,\n on_close=self._on_close,\n on_open=self._on_socket_open,\n on_message=self._on_message,\n on_error=self._on_error)\n self._t = _Thread(target=self._ws.run_forever)\n self._t.daemon = True\n self._thread_for_ping = _Thread(target=self.__quik_run_forever)\n self._thread_for_ping.daemon = True\n\n def __quik_run_forever(self):\n ticker = Event()\n while not ticker.wait(3):\n self._on_ping()\n\n #region socket standart funs\n def _on_error(self, error):\n self._ws = websocket.WebSocketApp(self._conn,\n on_close=self._on_close,\n on_open=self._on_socket_open,\n on_message=self._on_message,\n on_error=self._on_error)\n self._t = _Thread(target=self._ws.run_forever, kwargs={\"ping_interval\": 3, \"ping_timeout\": 2})\n self._t.start()\n\n print(\"startend\")\n print(type(error))\n print(error)\n\n def _on_ping(self):\n request = {\n \"msgid\": 10008,\n }\n self._ws.send(json.dumps(request))\n\n def _on_message(self, raw_msg):\n \"\"\"\n Entry for message processing. Call specific processors for different messages.\n \"\"\"\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)\n\n def _on_close(self):\n print('connection closed')\n\n def _on_socket_open(self):\n print(\"startup\")\n request = {\n \"msgid\": 10000,\n \"login\": self._login,\n \"password\": self._password,\n \"app_type\": \"WEB\",\n \"version\": \"7.2.0\",\n \"userAgent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0\",\n \"btc\": \"true\",\n \"height\": \"498\",\n \"width\": \"1920\"\n }\n self._ws.send(json.dumps(request))\n #endregion\n\n def start(self):\n self._t.start()\n time.sleep(10)\n self._thread_for_ping.start()\n\n def send_message(self, message):\n self._ws.send(json.dumps(message))\n\n def add_handler(self, handler):\n for msg_id in handler.message_ids:\n if self._handlers.get(msg_id):\n self._handlers[msg_id].append(handler)\n else:\n self._handlers[msg_id] = [handler]\n\n #region Common requests\n def ask_bottle(self, scode, depth=15):\n request = {\n \"msgid\": 11014,\n \"c\": \"QJSIM\",\n \"s\": scode,\n \"depth\": depth\n }\n self.send_message(request)\n\n def send_order(self, scode, price, quantity, is_sell=False):\n is_sell_num = 1 if is_sell else 0\n order = {\n \"msgid\": 12000,\n \"isMarket\": 0,\n \"isMarketSpread\": 0,\n \"spread\": \"0\",\n \"price\": f'{price}',\n \"takeProfit\": \"0\",\n \"offset\": \"0\",\n \"isStop\": \"0\",\n \"ccode\": \"QJSIM\",\n \"scode\": f'{scode}',\n \"account\": \"NL0011100043\",\n \"clientCode\": \"13222\",\n \"sell\": f'{is_sell_num}',\n \"quantity\": f'{quantity}'\n }\n self.send_message(order)\n\n def candles(self, scode, interval):\n request = {\n \"msgid\": 11016,\n \"c\": \"QJSIM\",\n \"s\": scode,\n \"p\": interval\n }\n self.send_message(request)\n #endregion","sub_path":"WebQuikConnector.py","file_name":"WebQuikConnector.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"469999640","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport protokoll as prot\nimport pandas as pd\nimport datetime\nimport hilfe_system as hs\nimport produktmanager as pm\n\nclass Fortschreibung:\n\n def __init__(self, f_dict):\n work_dir=f_dict.get('work_dir')\n file_protokoll=work_dir+'protokoll_system_fortschreibung.txt'\n self.oprot = prot.Protokoll(file_protokoll)\n\n self.file_system_bestand=f_dict.get('file_system_bestand')\n self.file_system_fortschreibung=f_dict.get('file_system_fortschreibung')\n self.file_system_bestand_struktur_dict=f_dict.get('file_system_bestand_struktur')\n self.file_system_fortschreibung_struktur_dict=f_dict.get('file_system_fortschreibung_struktur')\n\n self.files_dict=f_dict\n \n self.hilfe = hs.Hilfe_System()\n \n def LeseWertAusBestandCSV(self, key, name):\n datei=self.file_system_bestand\n struktur = self.file_system_bestand_struktur_dict\n df=pd.read_csv(datei, sep=\";\", dtype=struktur)\n \n vsnr = key.get('vsnr')\n histnr = int(key.get('histnr'))\n von = int(key.get('von'))\n bis = int(key.get('bis'))\n \n df1 = df[(df.vsnr == vsnr) & (df.histnr == histnr) & (df.von == von) & (df.bis == bis) & (df.name == name)]\n \n if df1.empty:\n wert=0\n text='In der Bestandstabelle: ' +datei+ ' mit der vsnr: ' +vsnr+ ' wurden für den namen: '+name+ 'keine Daten gefunden'\n self.oprot.SchreibeInProtokoll(text)\n else:\n index=df1.index[0]\n wert=df1.at[index, 'wert']\n\n return wert \n\n def LeseWertAusFortschreibungCSV(self, key_dict, name):\n datei=self.file_system_fortschreibung\n df=pd.read_csv(datei, sep=\";\")\n \n vsnr = key_dict.get('vsnr')\n histnr = key_dict.get('histnr')\n von = key_dict.get('von')\n bis = key_dict.get('bis')\n \n df[['vsnr', 'histnr', 'von', 'bis', 'name', 'wert']] = df[['vsnr', 'histnr', 'von', 'bis', 'name', 'wert']].astype(str)\n \n df1 = df[(df.vsnr == vsnr) & (df.histnr == histnr) & (df.von == von) & (df.bis == bis) & (df.name == name)]\n \n if df1.empty:\n wert=0\n text = 'Fortschreibung: in der Fortschreibungstabelle: ' +datei+ ' mit der vsnr: ' +vsnr+ ' wurden für den namen: '+name+ 'keine Daten gefunden'\n self.oprot.SchreibeInProtokoll(text)\n else:\n index=df1.index[0]\n wert=df1['wert'][index]\n \n return wert \n\n def LeseVertragAusBestand(self, key):\n vsnr = str(key.get('vsnr'))\n histnr = str(key.get('histnr'))\n von = str(key.get('von'))\n bis = str(key.get('bis'))\n \n vertrag={}\n \n datei=self.file_system_bestand\n df=pd.read_csv(datei, sep=\";\")\n df[['vsnr', 'histnr', 'von', 'bis', 'name', 'wert']] = df[['vsnr', 'histnr', 'von', 'bis', 'name', 'wert']].astype(str)\n df1 = df[(df.vsnr == vsnr) & (df.histnr == histnr) & (df.von == von) & (df.bis == bis)]['name']\n for name in df1:\n wert=self.LeseWertAusBestandCSV(key,name)\n vertrag[name]=wert\n \n return vertrag\n \n def SchreibeDictInFortschreibung(self, von_dict, bis_dict, vertrag):\n key={}\n eintrag={}\n \n key.clear()\n key['vsnr']=vertrag.get('vsnr')\n key['histnr']=vertrag.get('histnr')\n key['von']=von_dict.get('jjjjmmtt')\n key['bis']=bis_dict.get('jjjjmmtt')\n \n for index in vertrag:\n eintrag['name']=str(index)\n eintrag['wert']=vertrag.get(str(index))\n self.SchreibeInFortschreibungCSV(key, eintrag)\n\n def SchreibeInFortschreibungCSV(self, key, eintrag):\n datei=self.file_system_fortschreibung\n \n vsnr = key.get('vsnr')\n histnr = key.get('histnr')\n von = key.get('von')\n bis = key.get('bis')\n\n name=eintrag.get('name')\n wert=eintrag.get('wert')\n \n text=str(vsnr) + \";\" + str(histnr) + \";\" + str(von) + \";\" + str(bis) + \";\" + str(name) + \";\" + str(wert) + \"\\n\"\n \n f=open(datei, \"a\")\n f.write(text) \n f.close() \n \n def BestimmeKeyInFortschreibung(self, key_dict):\n datei=self.file_system_fortschreibung\n df=pd.read_csv(datei, sep=\";\", dtype=object)\n \n vsnr=key_dict.get('vsnr')\n histnr=key_dict.get('histnr')\n bis=key_dict.get('bis')\n von=''\n \n df1 = df[(df.vsnr == str(vsnr)) & (df.histnr == str(histnr)) & (df.bis == str(bis))] \n\n if df1.__len__() == 0:\n text='System: kein key in der Fortschreibung gefunden vsnr='+str(vsnr)+', histnr='+str(histnr)+', bis='+str(bis)\n self.oprot.SchreibeInProtokoll(text)\n else:\n df2 = df1[df1.name=='von']['von']\n if df2.__len__() == 0:\n text='System: Eigentlich muesste es einen key.von in der Fortschreibung geben. vsnr='+str(vsnr)+', histnr='+str(histnr)+', bis='+str(bis)\n self.oprot.SchreibeInProtokoll(text)\n else:\n if df2.__len__() == 1:\n # alles okay, es soll nur einen satz geben\n index=df2.index[0]\n wert=df2[index]\n von=wert\n else:\n text='System: Es wurden mehrere key.von in der Fortschreibung gefunden. Keine Eindeutige Zuordnung möglich!. vsnr='+str(vsnr)+', histnr='+str(histnr)+', bis='+str(bis)\n self.oprot.SchreibeInProtokoll(text)\n \n key_dict['von']=str(von) \n \n return key_dict\n \n def ListeOffenerVertraege(self, von_dict, bis_dict):\n #Ermittlung einer Liste aller aktiven Verträge, die fortgeschrieben werden sollen\n \n datei=self.file_system_bestand\n struktur=self.file_system_bestand_struktur_dict\n \n bis=int(bis_dict.get('jjjjmmtt'))\n von=int(von_dict.get('jjjjmmtt'))\n \n df=pd.read_csv(datei, sep=\";\", dtype=struktur)\n df1=df[df.name == 'vsnr']['wert']\n \n self.listeOffenerVertraege=None\n \n alle_daten_dict={}\n \n for vsnr in df1: \n vertrag_vorhanden=False \n \n alle_daten_dict.clear\n alle_daten_dict=df[(df.vsnr==vsnr)].groupby(['vsnr', 'histnr', 'von', 'bis']).groups\n\n for satz in alle_daten_dict:\n vsnr_histnr = satz[1]\n vsnr_von = satz[2]\n vsnr_bis = satz[3]\n if (vsnr_bis >= bis & vsnr_von <= von):\n vertrag_vorhanden=True\n bis_vertrag=vsnr_bis\n von_vertrag=vsnr_von\n histnr_vertrag=vsnr_histnr\n \n if vertrag_vorhanden == True:\n self.listeOffenerVertraege=hs.VerketteteListe(vsnr, histnr_vertrag, von_vertrag, bis_vertrag, self.listeOffenerVertraege)\n \n def FortschreibungVonBis(self, von_int, bis_int):\n \n von_dict=self.hilfe.DictAusDatum(str(von_int))\n bis_dict=self.hilfe.DictAusDatum(str(bis_int))\n\n self.ListeOffenerVertraege(von_dict, bis_dict)\n liste = self.listeOffenerVertraege\n if liste == None:\n text='System: Es wurden keine Vertraege zur Fortschreibung gefunden: von='+str(von_int)+' bis='+str(bis_int)\n print(text)\n self.oprot.SchreibeInProtokoll(text)\n return\n \n key={}\n key_alt={}\n vertrag_bestand={}\n vertrag_fort_alt={}\n vertrag_fort_neu={}\n while liste is not None:\n key.clear()\n key['vsnr']=liste.vsnr\n key['histnr']=liste.histnr\n key['von']=liste.von\n key['bis']=liste.bis\n \n vertrag_bestand.clear()\n vertrag_bestand=self.LeseVertragAusBestand(key)\n \n vertrag_fort_neu.clear()\n vertrag_fort_alt.clear()\n key_alt.clear()\n \n bis_alt_str=self.RechneDatum(von_dict.get('jjjjmmtt'), -1)\n \n key_alt['vsnr']=liste.vsnr\n key_alt['histnr']=liste.histnr\n key_alt['bis']=bis_alt_str\n\n key_alt=self.BestimmeKeyInFortschreibung(key_alt)\n vertrag_fort_alt=self.LeseVertragAusFortschreibung(key_alt)\n \n vertrag_fort_neu=vertrag_bestand\n vertrag_fort_neu=self.AnfangswerteFestlegen(vertrag_fort_alt, vertrag_fort_neu)\n \n self.SchreibeVertragFort(vertrag_fort_neu, von_dict, bis_dict)\n\n liste=liste.nxt\n else:\n text='Fortschreibung/FortschreibungVonBis: Es wurden alle Vertraege fortgeschrieben: von='+str(von_int)+' bis='+str(bis_int)\n print(text)\n self.oprot.SchreibeInProtokoll(text) \n \n \n \n def AnfangswerteFestlegen(self, vertrag_alt, vertrag_neu):\n \n vertrag_neu['bil_derue1_anfang'] = vertrag_alt['bil_derue1_ende']\n vertrag_neu['bil_derue2_anfang'] = vertrag_alt['bil_derue2_ende']\n vertrag_neu['bil_derue3_anfang'] = vertrag_alt['bil_derue3_ende']\n vertrag_neu['bil_derue5_anfang'] = vertrag_alt['bil_derue5_ende']\n vertrag_neu['bil_derue7_anfang'] = vertrag_alt['bil_derue7_ende']\n \n vertrag_neu['nachreservierung_anfang'] = vertrag_alt['nachreservierung_ende']\n\n return vertrag_neu\n \n def RechneDatum(self, datum_int, tage):\n datum_dict=self.hilfe.DictAusDatum(str(datum_int))\n tt_int=datum_dict.get('tt_int')\n mm_int=datum_dict.get('mm_int')\n jjjj_int=datum_dict.get('jjjj_int')\n datum=datetime.datetime(jjjj_int,mm_int,tt_int)\n \n d = datum + datetime.timedelta(tage)\n jjjj_neu=str(d.year)\n mm_neu=str(d.month).zfill(2)\n tt_neu=str(d.day).zfill(2)\n s=str(jjjj_neu)+str(mm_neu)+str(tt_neu)\n \n return s \n \n def LeseVertragAusFortschreibung(self, key_dict):\n vsnr = str(key_dict.get('vsnr'))\n histnr = str(key_dict.get('histnr'))\n von = str(key_dict.get('von'))\n bis = str(key_dict.get('bis'))\n \n vertrag={}\n \n datei=self.file_system_fortschreibung\n df=pd.read_csv(datei, sep=\";\")\n df[['vsnr', 'histnr', 'von', 'bis', 'name', 'wert']] = df[['vsnr', 'histnr', 'von', 'bis', 'name', 'wert']].astype(str)\n df1 = df[(df.vsnr == vsnr) & (df.histnr == histnr) & (df.von == von) & (df.bis == bis)]['name']\n for name in df1:\n wert=self.LeseWertAusFortschreibungCSV(key_dict,name)\n vertrag[name]=wert\n \n return vertrag\n\n def IstVertragInFortschreibung(self, vertrag):\n datei=self.file_system_fortschreibung\n \n df=pd.read_csv(datei, sep=\";\", dtype=object)\n \n vsnr=vertrag.get('vsnr')\n \n df1 = df[(df.vsnr == str(vsnr))] \n\n dic={}\n if df1.__len__() == 0:\n dic['fortgeschieben']='nein'\n else:\n dic['fortgeschieben']='nein'\n \n return dic\n \n def SchreibeVertragFort(self, vertrag, von_dict, bis_dict):\n opm = pm.Produktmanager(self.files_dict, vertrag)\n opm.FortschreibungVonBis(von_dict, bis_dict, vertrag)\n self.SchreibeDictInFortschreibung(von_dict, bis_dict, vertrag)\n","sub_path":"fortschreibung.py","file_name":"fortschreibung.py","file_ext":"py","file_size_in_byte":11535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"313619792","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2019 LG Electronics, Inc.\n#\n# This software contains code licensed as described in LICENSE.\n#\n\nimport simulation.config as config\nimport lgsvl\nimport random\nimport time\n\ncf = config.Config()\nsim = cf.Simulator()\ncf.LoadOrResetScene(sim, \"BorregasAve\")\n\nspawns = sim.get_spawn()\n\nstate = lgsvl.AgentState()\nstate.transform = spawns[0]\na = sim.add_agent(\"Lincoln2017MKZ (Apollo 5.0)\", lgsvl.AgentType.EGO, state)\n\nprint(\"Current time:\", sim.time_of_day)\n\ninput(\"Press Enter to set fixed time to 19:00\")\n\n# Time of day can be set from 0 ... 24\nsim.set_time_of_day(19.0)\nprint(sim.time_of_day)\n\nsim.run(5)\n\ninput(\"Press Enter to set normal time to 10:30\")\n# Normal time moves forward (at an accelerated rate). Pass False to set_time_of_day for this to happen\nsim.set_time_of_day(10.5, False)\nprint(sim.time_of_day)\n\nsim.run(5)\n\nprint(sim.time_of_day)","sub_path":"19-time-of-day.py","file_name":"19-time-of-day.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"473348400","text":"import pandas as pd \nimport csv\nfrom players.models import Player\n\n\ndef turn_to_int(value): #Used to turn Market Value field to a integer\n s = value\n s = s[1:]\n last = s[-1]\n if last == 'M':\n s = s[:-1]\n if '.' in s:\n s = s.replace('.','')\n s = s + '00000'\n else:\n s = s + '000000'\n if last == 'K':\n s = s[:-1]\n if '.' in s:\n s = s.replace('.','')\n s = s + '00'\n else:\n s = s + '000'\n return int(s)\n\n\n\ndef read_table(sometable, function): # To populate database with data from .csv file\n\tdf = pd.read_csv(sometable, sep=',', usecols = ['Name', 'Age', 'Photo', 'Nationality', 'Overall','Club', 'Value','Position'])\n\tfor index, row in df.iterrows():\n\t\tPlayer.objects.get_or_create(\n\t\t\tname=row['Name'], \n\t\t\tage=row['Age'],\n\t\t\tphoto=row['Photo'],\n\t\t\tnationality=row['Nationality'],\n\t\t\toverall=row['Overall'],\n\t\t\tclub=row['Club'],\n\t\t\tvalue=row['Value'],\n\t\t\tposition=row['Position'],\n\t\t\tvalue_int=function(row['Value']),\n\t\t\t)\n\treturn df","sub_path":"players/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"405479017","text":"# usgs.py\n\nimport math\nimport traceback\nimport logging\n\nlogger = logging.getLogger('usng')\n\ngzdletters = 'NPQRSTUVWX'\nmymap = 'ABCDEFGHJKLMNPQRSTUVABCDEFGHJKLMNPQRSTUV'\nmxmap = '_ABCDEFGHJKLMNPQRSTUVWABCDEFGHJKLMNPQRSTUVW'\ngzdmap = 'NPQRSTUVWX'\nextramap = 'abcdefgh'\n\ndef __init__(self):\n pass\n\ndef hash(x, y, digits=6):\n # this hashing method is all easting digits then all northing digits\n xs = '%9.9d'%int(x)\n ys = '%9.9d'%int(y)\n\n if digits == 10:\n v = xs[-5:-1] + ys[-5:-1]\n elif digits == 8:\n v = xs[-5:-2] + ys[-5:-2]\n elif digits == 6:\n v = xs[-5:-3] + ys[-5:-3]\n elif digits == 4:\n v = xs[-5:-4] + ys[-5:-4]\n elif digits == 2:\n v = xs[-5] + ys[-5]\n else:\n v = ''\n\n return v\n\ndef to_usng(x, y, zone=None, gridSize=1000, precision=8):\n #logger.debug('{x}, {y}, {g}, {z}'.format(x=x, y=y, g=gridSize, z=zone))\n\n try:\n if not zone == None:\n gzdSet = zone % 6\n if gzdSet == 1:\n xzo = 0\n yzo = 0\n elif gzdSet ==2:\n xzo = 8\n yzo = 5\n elif gzdSet ==3:\n xzo = 16\n yzo = 0\n elif gzdSet ==4:\n xzo = 0\n yzo = 5\n elif gzdSet ==5:\n xzo = 8\n yzo = 0\n elif gzdSet ==0:\n xzo = 16\n yzo = 5\n else:\n assert False, \"UTM Zone is not understood\"\n\n # decode the coordinates to the labelled 100,000 grid cell\n xz = mxmap[int(x / 100000) + xzo]\n yz = mymap[(int(y / 100000) + yzo) % 20]\n gzd = '%d%s'%(zone, gzdmap[int(y / 900000)])\n else:\n xz = ''\n xz = ''\n yz = ''\n gzd = ''\n\n # ignore precision and use the gridSize to choose the number of digits to hash toghether\n digits = int(10 - (2 * round(math.log(float(gridSize), 10))))\n return gzd + xz + yz + hash(x, y, digits)\n\n except:\n # compute the origin\n mxs = int(x / 100000) + xzo\n logger.warning('mxs {m}'.format(m=mxs))\n traceback.print_exc()\n gzd = '####'\n xz = ''\n yz = ''\n\n return gzd + xz + yz + hash(x, y, 4)\n\ndef utm_zone_ll(zon):\n assert int(zon) >= 1, \"UTM Zones start at 1\"\n assert int(zon) <= 60, \"UTM Zones end at 60\"\n\n # compute the utm slice in Lat/Lon\n zx = int(zon) - 1\n slc = 360.0 / 60.0\n lon = -180.0 + (slc * zx)\n lon2 = -180.0 + (slc * (zx + 1))\n\n wkt = \"\"\"POLYGON(({px1} {py1}, {px2} {py2}, {px3} {py3},\n {px4} {py4}, {px1} {py1}))\"\"\".format(px1=lon,py1=0.0,\n px2=lon,py2=80.0,\n px3=lon2,py3=80.0,\n px4=lon2,py4=0.0)\n\n logger.debug(\"zone: {z}, wkt: {wkt}\".format(wkt=wkt, z=zon))\n return wkt\n\ndef utm_gzd_zone_data(zon):\n assert int(zon) >= 1, \"UTM Zones start at 1\"\n assert int(zon) <= 60, \"UTM Zones end at 60\"\n\n data = []\n\n # GXD are 6 degrees wide (longitude) and 8 degrees high (latitude)\n for gzd_lat in range(0, 80, 8):\n # compute the utm slice in Lat/Lon\n zx = int(zon) - 1\n slc = 360.0 / 60.0\n lon = -180.0 + (slc * zx)\n lon2 = -180.0 + (slc * (zx + 1))\n\n # set the min/max latitudes and force them to floats\n minlat = gzd_lat + 0.0\n maxlat = gzd_lat + 8 + 0.0\n\n wkt = \"\"\"POLYGON(({px1} {py1}, {px2} {py2}, {px3} {py3},\n {px4} {py4}, {px1} {py1}))\"\"\".format(px1=lon, py1=minlat,\n px2=lon, py2=maxlat,\n px3=lon2, py3=maxlat,\n px4=lon2, py4=minlat)\n\n logger.debug(\"zone: {z}, wkt: {wkt}\".format(wkt=wkt, z=zon))\n data.append([zon, 'N', gzdletters[int(gzd_lat / 8)], wkt])\n\n return data\n","sub_path":"usng_grid_builder/usng.py","file_name":"usng.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"384586102","text":"from src.typify import TypeExtractor, isNumeric\nfrom src.domains import Domain, getTable\nfrom src import database\nimport string\n\n\ndef __extractOfType(typed:[[string, int]], toExtract:int) -> [string]:\n \"\"\"\n Extracts all tokens in the input query that match the type specified.\n @param typed: the typed query\n @param toExtract: the type to extract. Expected in 1-indexed. For example, Type I should be 1.\n @return a list of the string tokens that match the required type\n \"\"\"\n ofType = []\n for token in typed:\n if token[1] == toExtract:\n ofType.append(token[0])\n return ofType\n\n\ndef __matchColumns(typeValues:[string], typeCols:[int], table:database.Table) -> [[string, [int]]]:\n \"\"\"\n Uses database queries to find which columns/attributes each of the typed tokens apply to.\n @param typeValues: a list of token values in the original query that were found to be the correct type\n @param typeCols: a list of columns in the table that are applicable for the used type\n @param table: the table that should be searched with the specified columns\n @return a list of token lists, where the first index in each sublist is the string token, and the second\n index is the column(s) found for that string token. For example: [['foo', [0,1]], ['bar', [1]]]\n \"\"\"\n matchList = []\n for token in typeValues:\n matched = []\n for col in typeCols:\n where = table.dat[col][0] + ' LIKE \"%' + token + '%\"'\n #print(where)\n result = database.query(table, [col], where)\n if len(result) > 0: # If there was some match in this column\n matched.append(col)\n if len(matched) > 0:\n matchList.append([token, matched])\n return matchList\n\n\ndef __reduce(matchList: [[string, int]], table:database.Table) -> [[string, int]]:\n \"\"\"\n Attempts to reduce the match list (as returned from __matchColumns) by combining sequential tokens of the\n same type. Calls will be made to the database to ascertain whether the combined types exist before the\n tokens are merged. Potential merging possibilities include sequential (fx 'Harley' 'Davidson' -> 'Harley Davidson')\n and reverse (fx 'Accord' 'Honda' -> 'Honda Accord')\n @param matchList: the list of tokens and attributes to try to merge\n @param table: the table to which these tokens belong\n @return none. The matchList parameter is modified.\n \"\"\"\n \n # When we get the matchList, it is a list of tokens and the column(s) that it applies to.\n # For example: [['honda', [0]], ['accord', [0,1]]\n # The order is important, since tokens are only logically connected to tokens in sequential order.\n # (we would not want to try to connect tokens separated by another token). This structure given\n # works well for simple uses. However, when we want to reduce the terms, it is possible for two\n # tokens to require the same position. For example, if honda and accord are combined, then there\n # must be ['honda accord', [0]] and ['accord', [1]] at the *same position* if either or both need\n # to combine with a subsequent token.\n # Therefore, we simplify the given structure to only have one possible row and create a list for\n # each possible position\n \n # ls is where we will save all the positional tokens. It must be the length of the original match list\n ls = []\n for _ in range(len(matchList)):\n ls.append([])\n # Now we break each token into all its rows and put it in its place\n for i in range(len(matchList)):\n matched = matchList[i]\n for col in matched[1]:\n ls[i].append([matched[0], col])\n \n # With our new structure, we want to go through each index (starting at 1) to the end and try to match\n # with the index immediately before if the columns match\n i = 1\n while i < len(matchList):\n c = 0\n while c < len(ls[i]):\n curr = ls[i][c]\n a = 0\n while a < len(ls[i-1]):\n last = ls[i-1][a]\n # Verify that the cols of curr and last match\n if curr[1] != last[1]:\n a += 1\n continue\n col = curr[1]\n \n # We will try to combine sequentially, then backwards\n ariadne = False\n tryNames = [(matchList[i-1][0] + ' ' + matchList[i][0]),\n (matchList[i][0] + ' ' + matchList[i-1][0])]\n for tryName in tryNames:\n # The form 'column LIKE \"%token%\"' will match any entry where the column contains the substring \"token\". \n where = table.dat[col][0] + ' LIKE \"%' + tryName + '%\"'\n if len(database.query(table, [col], where)):\n # There was a match, therefore we need to remove both curr and last from their respective lists\n ls[i].pop(c)\n ls[i-1].pop(a)\n # It is replaced by the new joint entry\n ls[i].insert(0, [tryName, col]) # insert at the beginning so we don't redo it\n # We only use the first combination that succeeds for this pair\n # redo the index for a since we deleted what was there\n # get the next index c\n ariadne = True\n break\n if ariadne:\n break\n a += 1\n c += 1\n i += 1\n \n # After reduction is done, the order does not matter, so we flatten ls and return the result\n ret = []\n for pos in ls:\n for token in pos:\n ret.append(token)\n return ret\n\n\ndef type1Where(typed:[[string, int]], table:database.Table) -> [string]:\n # We are going to want to pull out all the type Is from the typed query\n typeI = __extractOfType(typed, 1)\n \n # Now we want to see which type 1 these match (if there are multiple columns for this domain)\n typeICol = table.idxCol\n \n '''\n print(\"Type 1 tokens:\")\n print(typeI)\n print(\"Type 1 columns:\")\n print(typeICol)\n '''\n \n # First we want to know which of the Type I columns each token matches to. There could be several.\n matchList = __matchColumns(typeI, typeICol, table)\n #print(\"match list:\", matchList)\n # And with that match list, we will try to reduce terms\n matchList = __reduce(matchList, table)\n \n # At this point, we should have a finalized matchList to operate with\n #print(matchList)\n # We are going to separate each of the different constraints (so that some may be dropped if needed in partial matching)\n ret = []\n for matched in matchList:\n ret.append(table.dat[matched[1]][0] + ' LIKE \"%' + matched[0] + '%\"')\n return ret\n\n\ndef type2Where(typed:[[string, int]], table:database.Table, domain:Domain) -> [string]:\n typeII = __extractOfType(typed, 2)\n \n # There is nowhere else that Type II attributes are saved for each table.\n # Thus, the data is here: \n cols = []\n if domain == Domain.CAR:\n cols = [1, 6, 8, 10, 11, 12, 13, 14, 15, 16]\n elif domain == Domain.FURNITURE:\n cols = [8, 9]\n elif domain == Domain.HOUSING:\n cols = [0, 1, 15, 18]\n elif domain == Domain.JEWELRY:\n cols = [4, 5] # maybe the title should be considered an indexing key...\n elif domain == Domain.JOB:\n cols = [3, 5, 6, 7, 9, 10, 11, 13]\n elif domain == Domain.MOTORCYCLE:\n cols = [3]\n \n matchList = __matchColumns(typeII, cols, table)\n matchList = __reduce(matchList, table)\n \n ret = []\n for matched in matchList:\n ret.append(table.dat[matched[1]][0] + ' LIKE \"%' + matched[0] + '%\"')\n return ret\n\n\ndef type3Where(typed:[[string, int]], table:database.Table) -> [string]:\n # We will want to find the unit attached to each type 3. It can be either before or after\n #TODO: ranges have implied units. For example \"300 - 500 miles\" -> \"300 miles\" - \"500 miles\"\n black = -1 # if we use a unit after the number, the unit cannot be reused for before the next number\n for i in range(len(typed)):\n token = typed[i]\n if token[1]==3 and isNumeric(token[0]):\n # We found a value! Now we need to find a corresponding unit.\n # Try the previous token\n unit = None\n if i-1!=black and typed[i-1][1] == 3 and not isNumeric(typed[i-1][0]):\n # We assume this is the unit. It is type 3, which is either a unit or a number.\n # It is not a number. Therefore, we assume it is the unit.\n unit = typed[i-1][0]\n elif i+1 < len(typed) and typed[i+1][1] == 3 and not isNumeric(typed[i-1][0]):\n # Since we are using a unit after the number, we must set this unit to the blacklist.\n # That way it cannot be used again by later numbers (using it as previous)\n black = i+1\n unit = typed[i+1][0]\n \n if not unit is None:\n cols = []\n # Now that we have a unit, we are going to try to use it. Hopefully it actually exists in the table\n for attr in table.dat:\n if len(attr) == 3: # if it has length three, then it is of the form: name, type, [units]\n # Therefore, we try to match the found unit to the unit here\n units = attr[2]\n for tUnit in units:\n if tUnit == unit:\n # We don't have to match all the unit variations, only one\n cols.append(attr[0])\n if len(cols) > 0:\n # we found maybe several matches. They should be OR-ed together to the final result\n pass\n \n return [] #TODO: this is just a filler until we can get it done\n \n\nif __name__ == '__main__':\n # We should get a query from the user here\n # (Here is a sample query that we hardcode in for testing.)\n ''' Failed queries:\n 'house in Melbourne Australia with 5 bedrooms'\n 'senior data engineer in utah'\n 'apartment in Provo'\n 'house in Australia with 2 bathrooms'\n 'toyota black car in excellent condition cheapest'\n '''\n query = 'honda accord red new'\n #'Kawasaki Ninja 400 less than 200,000 miles and under $6,000'\n #'golden necklace that is 16 carat'\n \n # Now we must categorize the query to know which domain we are searching\n import src.multinomial_classification.run_classifier as classify\n classifier = classify.Classifier()\n classified = classifier.classify([query])\n if len(classified) == 0:\n raise Exception(\"The query could not be classified!\")\n classified = classified[0]\n if classified == \"car\":\n domain = Domain.CAR\n elif classified == \"furniture\":\n domain = Domain.FURNITURE\n elif classified == \"housing\":\n domain = Domain.HOUSING\n elif classified == \"jewelry\":\n domain = Domain.JEWELRY\n elif classified == \"computer science jobs\":\n domain = Domain.JOB\n elif classified == \"motorcycles\":\n domain = Domain.MOTORCYCLE\n else:\n raise Exception(\"The classification of the query did not match any of the expected domains! Got: \" + classified)\n table = getTable(domain)\n \n # now we want to pull some data out (Type I, II, III)\n extractor = TypeExtractor()\n typed = extractor.typify(query, domain)\n print(\"Typed query:\")\n print(typed, '\\n')\n \n # Now we want to start building the query.\n # It is going to be in the form of a SELECT statement, with an AND for each of the types that need to be matched\n # For example, SELECT * FROM table WHERE typeI AND typeII AND typeIII\n typeIWhere = type1Where(typed, table)\n print(typeIWhere)\n typeIIWhere = type2Where(typed, table, domain)\n print(typeIIWhere)\n \n \n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"602806616","text":"with open(churras.txt,\"r\") as arquivo:\n conteudo = arquivo.readlines()\n \n separando = conteudo.split(\";\")\n A = int(separando[1])\n B = float(separando[2])\n \n separando[1] = A\n separando[2] = B\n \n print(len(B))","sub_path":"backup/user_144/ch87_2019_06_04_01_02_29_584513.py","file_name":"ch87_2019_06_04_01_02_29_584513.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"205971481","text":"import os\nimport logging\nimport tempfile\nfrom aiohttp import web\nfrom . import bootstrap\nfrom time import time\n\nlog = logging.getLogger(__name__)\n\n\ndef build_health_endpoint(\n name, update_server_version, api_server_version, smoothie_version,\n system_version, with_migration):\n health_dict = {\n 'name': name,\n 'updateServerVersion': update_server_version,\n 'apiServerVersion': api_server_version,\n 'smoothieVersion': smoothie_version,\n 'systemVersion': system_version,\n 'capabilities': {\n 'bootstrap': '/server/update/bootstrap',\n 'balenaUpdate': '/server/update',\n 'restart': '/server/update/restart'}\n }\n if with_migration:\n health_dict['capabilities']['buildrootMigration']\\\n = '/server/update/migration/begin'\n\n async def health(request: web.Request) -> web.Response:\n return web.json_response(\n health_dict,\n headers={'Access-Control-Allow-Origin': '*'}\n )\n return health\n\n\nasync def bootstrap_update_server(\n request: web.Request, test_flag=False) -> web.Response:\n start_time = time()\n data = await request.post()\n wheel = data.get('whl')\n log.debug('Got whl: {}'.format(wheel))\n log.debug(' filename: {}'.format(wheel.filename if wheel else ''))\n res = {'status': 'in progress'}\n tmpd = None\n filename = None\n python = None\n venv_site_pkgs = None\n\n # Unpack wheel and install into a virtual environment\n if not wheel:\n log.debug('No wheel file provided')\n res = {\n 'status': 'failure',\n 'message': '\"whl\" parameter missing from request'}\n\n if res.get('status') != 'failure':\n tmpd = tempfile.mkdtemp()\n filename = os.path.join(tmpd, wheel.filename) # type: ignore\n log.info('Preparing to install: {}'.format(filename))\n content = wheel.file.read() # type: ignore\n\n with open(filename, 'wb') as wf:\n wf.write(content)\n\n log.debug('Bootstrapping update server {} [test mode: {}]'.format(\n filename, test_flag))\n\n res, python, venv_site_pkgs, venv\\\n = await bootstrap.install_sandboxed_update(filename, request.loop)\n log.debug('Install complete with status: {}'.format(res.get('status')))\n\n if python and res.get('status') != 'failure':\n if test_flag:\n log.debug('Test mode, not testing successive install')\n res = {'status': 'Successfully installed update'}\n else:\n test_port = 34001\n res = await bootstrap.test_update_server(\n python, test_port, filename, venv_site_pkgs, venv)\n\n if res.get('status') == 'failure':\n log.debug('Test failed, not installing update')\n status = 400\n elif not test_flag:\n log.debug('Test successful, installing update')\n install_res, returncode = await bootstrap.install_update(\n filename, request.loop)\n res.update(install_res)\n if returncode == 0:\n status = 200\n else:\n status = 400\n else:\n log.debug('Self-test successful on test server')\n status = 200\n\n bootstrap.clean(tmpd)\n\n request_time = time() - start_time\n log.info('Bootstrap request took {:.3f} seconds'.format(request_time))\n return web.json_response(res, status=status)\n","sub_path":"update-server/otupdate/balena/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"466937882","text":"#!/usr/bin/env python3\n\"\"\"\nscript de demonstration pour tycat.\n\"\"\"\n\nfrom random import random\nfrom itertools import combinations, product\nfrom geo.point import Point\nfrom geo.segment import Segment\nfrom geo.tycat import tycat\n\ndef main():\n \"\"\"\n tycat example\n \"\"\"\n points = [[Point([random(), random()]) for _ in range(5)] for _ in range(2)]\n segments = [[Segment(endpoints) for endpoints in combinations(p, r=2)] for p in points]\n print(\"tycat(points, segments)\")\n tycat(points, segments)\n print(\"tycat(zip(iter(points), iter(segments)))\")\n tycat(zip(iter(points), iter(segments)))\n print(\"tycat(*zip(iter(points), iter(segments)))\")\n tycat(*zip(iter(points), iter(segments)))\n intersections = filter(None, (c[0].intersection_with(c[1]) for c in product(*segments)))\n print(\"intersections entre rouge et vert\")\n tycat(segments[0], segments[1], intersections)\n\nmain()\n","sub_path":"demo_tycat.py","file_name":"demo_tycat.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"204879101","text":"import pandas as pd\n\nmovies_df = pd.read_csv('https://raw.githubusercontent.com/Sisha3342/'\n 'pydata-book/2nd-edition/datasets/movielens/movies.dat',\n sep='::', names=['ID', 'Name', 'Genre'], index_col='ID',\n engine='python')\nmovies_df['Year'] = movies_df.Name.str[-5:-1]\nmovies_df.Name = movies_df.Name.str.replace('\\(\\w+\\)', '', regex=True)\n\nusers_df = pd.read_csv('https://raw.githubusercontent.com/Sisha3342/'\n 'pydata-book/2nd-edition/datasets/movielens/users.dat',\n sep='::', names=['ID', 'Gender', 'Age', 'Occupation', 'Zip'],\n index_col='ID', engine='python')\n\noccupations = {0: 'other', 1: 'academic/educator', 2: 'artist',\n 3: 'clerical/admin', 4: 'college/grad student', 5: 'customer service',\n 6: 'doctor/health care', 7: 'executive/managerial', 8: 'farmer',\n 9: 'homemaker', 10: 'K-12 student', 11: 'lawyer',\n 12: 'programmer', 13: 'retired', 14: 'sales/marketing',\n 15: 'scientist', 16: 'self-employed', 17: 'technician/engineer',\n 18: 'tradesman/craftsman', 19: 'unemployed', 20: 'writer'}\n\nages = {1: 'Under 18', 18: '18-24', 25: '25-34', 35: '35-44', 45: '45-49',\n 50: '50-55', 56: '56+'}\n\nusers_df.eval('Age = Age.apply(@ages.get)', inplace=True)\nusers_df.eval('Occupation = Occupation.apply(@occupations.get)', inplace=True)\n\nratings_df = pd.read_csv('https://raw.githubusercontent.com/Sisha3342/'\n 'pydata-book/2nd-edition/datasets/movielens/ratings.dat',\n sep='::', names=['UserID', 'MovieID', 'Rating', 'Timestamp'],\n engine='python')\n","sub_path":"data_analysis/movies_analysis/movies_dataframes.py","file_name":"movies_dataframes.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"339126073","text":"import json\nimport logging\nimport tornado\nimport addict\nimport functools\nfrom .errcode import ErrorCode\nfrom json import JSONEncoder\nfrom tornado import gen\nfrom tornado.web import RequestHandler\nfrom tornado.web import HTTPError\nfrom datetime import datetime, date\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef authenticated(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if not self.current_user:\n self.fail(code=ErrorCode.unauthorized, message=\"session_required\")\n else:\n return method(self, *args, **kwargs)\n\n return wrapper\n\n\nclass ApiJsonEncoder(JSONEncoder):\n def default(self, obj):\n try:\n if isinstance(obj, datetime):\n return obj.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif isinstance(obj, date):\n return obj.strftime(\"%Y-%m-%d\")\n\n iterable = iter(obj)\n except TypeError as e:\n logger.warn(e)\n else:\n return list(iterable)\n return JSONEncoder.default(self, obj)\n\n\nclass APIError(Exception):\n code = \"-10000\"\n\n\nclass BaseMixin(object):\n def dumpjson(self, response):\n return json.dumps(response, cls=ApiJsonEncoder)\n\n def _get_msg_by_language(self, language, message):\n return (\n self.LANGUAGE_MAP[language][message]\n if self.LANGUAGE_MAP.get(language)\n and self.LANGUAGE_MAP[language].get(message)\n else message\n )\n\n def output(self, response, **kwargs):\n self._chunk = self.dumpjson(response)\n if not kwargs.get(\"content_type\"):\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(self._chunk)\n self.finish()\n\n def success(self, message=\"ok\", **kwargs):\n response = {\"code\": ErrorCode.success, \"message\": message, \"data\": kwargs}\n response[\"enmsg\"] = self._get_msg_by_language('en-US', message)\n response[\"cnmsg\"] = self._get_msg_by_language('zh-CN', message)\n self.output(response)\n\n\nclass BaseAPIHandler(RequestHandler, BaseMixin):\n\n ERR_PREFIX = None\n\n def fail(self, code, message, status_code=200, **kwargs):\n logger.info(\n \"status_code: %s, message: %s, kwargs: %s\" % (status_code, message, kwargs)\n )\n if status_code != 200:\n self.set_status(status_code)\n self.set_header(\"system_error_format\", \"json\")\n\n errcode = code\n if getattr(self, \"ERR_PREFIX\", None):\n errcode = self.ERR_PREFIX + errcode\n\n response = {\"code\": errcode, \"message\": message, \"data\": kwargs}\n response[\"enmsg\"] = self._get_msg_by_language('en-US', message)\n response[\"cnmsg\"] = self._get_msg_by_language('zh-CN', message)\n self.output(response)\n\n def info(self, status_code=200, **kwargs):\n logger.info(\"status_code: %s, kwargs: %s\" % (status_code, kwargs))\n self.set_status(status_code)\n self.output(kwargs)\n\n def get_current_user(self):\n userid = self.request.headers.get(\"userid\")\n if not userid:\n return\n\n return userid\n\n @property\n def userid(self):\n return self.current_user\n\n\nclass APIHandler(BaseAPIHandler):\n def api(self, params):\n return {}\n\n def parse_params(self):\n return (\n addict.Dict(tornado.escape.json_decode(self.request.body))\n if self.request.body\n else addict.Dict({})\n )\n\n def validate_params(self, params):\n \"\"\"\n params validate success return True value jump to finish\n else return None\n \"\"\"\n return True, \"\"\n\n def post(self):\n params = self.parse_params()\n result, message = self.validate_params(params)\n if result is False:\n self.fail(code=ErrorCode.system_error, message=message)\n return\n data = self.api(params)\n if not self._finished:\n self.success(**data if data else {})\n\n\nclass SessionAPI(APIHandler):\n @authenticated\n def post(self, *args, **kwargs):\n if self._finished:\n return\n super(SessionAPI, self).post(*args, **kwargs)\n\n","sub_path":"dragonlib/web/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"389083050","text":"import requests\n\nclass Weather:\n\n def __init__(self):\n self.appid = '11c0d3dc6093f7442898ee49d2430d20'\n self.city_id = 0\n\n\n def main(self, s_city):\n\n res = requests.get(\"http://api.openweathermap.org/data/2.5/find\",\n params={'q': s_city, 'type': 'like', 'units': 'metric', 'APPID': self.appid})\n data = res.json()\n cities = [\"{} ({})\".format(d['name'], d['sys']['country'])\n for d in data['list']]\n self.city_id = data['list'][0]['id']\n\n\n res = requests.get(\"http://api.openweathermap.org/data/2.5/weather\",\n params={'id': self.city_id, 'units': 'metric', 'lang': 'ru', 'APPID': self.appid})\n data = res.json()\n conditions = data['weather'][0]['description']\n temp = data['main']['temp']\n temp_min = data['main']['temp_min']\n temp_max = data['main']['temp_max']\n return conditions, temp, temp_min, temp_max\n\n","sub_path":"weather_vk_bot.py","file_name":"weather_vk_bot.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"154051018","text":"\"\"\"\n Library of EV3 robot functions that are useful in many different applications. For example things\n like arm_up, arm_down, driving around, or doing things with the Pixy camera.\n\n Add commands as needed to support the features you'd like to implement. For organizational\n purposes try to only write methods into this library that are NOT specific to one tasks, but\n rather methods that would be useful regardless of the activity. For example, don't make\n a connection to the remote control that sends the arm up if the ir remote control up button\n is pressed. That's a specific input --> output task. Maybe some other task would want to use\n the IR remote up button for something different. Instead just make a method called arm_up that\n could be called. That way it's a generic action that could be used in any task.\n\"\"\"\n\nimport ev3dev.ev3 as ev3\nimport math\nimport time\n\n\nclass Snatch3r(object):\n \"\"\"Commands for the Snatch3r robot that might be useful in many different programs.\"\"\"\n\n def __init__(self):\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.MAX_SPEED = 900\n self.color_sensor = ev3.ColorSensor()\n assert self.color_sensor\n self.ir_sensor = ev3.InfraredSensor()\n assert self.ir_sensor\n self.beacon_seeker = ev3.BeaconSeeker(channel=1)\n assert self.beacon_seeker\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n self.come_back = False\n self.spot1 = True\n self.spot2 = True\n self.spot3 = True\n self.spot4 = True\n\n def drive_inches(self, distance, speed):\n\n assert self.left_motor.connected\n assert self.right_motor.connected\n\n position = distance * 90\n\n self.left_motor.run_to_rel_pos(position_sp=position, speed_sp=speed,\n stop_action='brake')\n self.right_motor.run_to_rel_pos(position_sp=position, speed_sp=speed,\n stop_action='brake')\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n self.right_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n def turn_degrees(self, degrees_to_turn, turn_speed_sp):\n\n assert self.left_motor.connected\n assert self.right_motor.connected\n\n self.left_motor.run_to_rel_pos(position_sp=degrees_to_turn * 470 / 90,\n speed_sp=turn_speed_sp,\n stop_action='brake')\n self.right_motor.run_to_rel_pos(\n position_sp=-degrees_to_turn * 470 / 90,\n speed_sp=turn_speed_sp,\n stop_action='brake')\n\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n self.right_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n def arm_calibration(self):\n assert self.arm_motor\n assert self.touch_sensor\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action=\"brake\")\n\n ev3.Sound.beep().wait()\n arm_revolutions_for_full_range = 14.2\n self.arm_motor.run_to_rel_pos(\n position_sp=-arm_revolutions_for_full_range * 360,\n speed_sp=self.MAX_SPEED)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n ev3.Sound.beep().wait()\n\n self.arm_motor.position = 0\n\n def arm_up(self):\n assert self.arm_motor\n assert self.touch_sensor\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep()\n\n def arm_down(self):\n assert self.arm_motor\n self.arm_motor.run_to_abs_pos(\n position_sp=0, speed_sp=self.MAX_SPEED)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep()\n\n def loop_forever(self):\n self.running = True\n while self.running:\n time.sleep(0.1)\n\n def shutdown(self):\n self.running = False\n self.left_motor.stop(stop_action='brake')\n self.right_motor.stop(stop_action='brake')\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n ev3.Sound.speak(\"Goodbye\").wait()\n\n def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)\n\n def turn_left(self, left_speed):\n self.left_motor.run_forever(speed_sp=-left_speed)\n self.right_motor.run_forever(speed_sp=left_speed)\n\n def turn_right(self, right_speed):\n self.left_motor.run_forever(speed_sp=right_speed)\n self.right_motor.run_forever(speed_sp=-right_speed)\n\n def stop(self):\n self.left_motor.stop(stop_action='brake')\n self.right_motor.stop(stop_action='brake')\n\n def drive_backward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=-left_speed)\n self.right_motor.run_forever(speed_sp=-right_speed)\n\n def seek_beacon(self):\n\n forward_speed = 300\n turn_speed = 100\n\n while not self.touch_sensor.is_pressed:\n current_heading = self.beacon_seeker.heading\n current_distance = self.beacon_seeker.distance\n if current_distance == -128:\n # If the IR Remote is not found just sit idle for this program until it is moved.\n print(\"IR Remote not found. Distance is -128\")\n self.stop()\n else:\n if math.fabs(current_heading) < 2:\n # Close enough of a heading to move forward\n print(\"On the right heading. Distance: \", current_distance)\n # You add more!\n if current_distance == 0:\n self.stop()\n return True\n\n elif current_distance > 0:\n self.drive_forward(forward_speed, forward_speed)\n\n elif math.fabs(current_heading) < 10:\n print(\"Adjusting heading: \", current_heading)\n if current_heading < 0:\n self.turn_left(turn_speed)\n\n elif current_heading > 0:\n self.turn_right(turn_speed)\n\n elif math.fabs(current_heading) > 10:\n print(\"Heading is too far off to fix: \", current_heading)\n\n time.sleep(0.2)\n\n # The touch_sensor was pressed to abort the attempt if this code runs.\n\n self.stop()\n return False\n\n def follow_the_line(self):\n while True:\n print('follow line')\n if self.color_sensor.reflected_light_intensity >= 90:\n self.turn_right(100)\n\n elif self.color_sensor.reflected_light_intensity <= 10:\n self.drive_forward(300, 300)\n\n elif self.color_sensor.color == ev3.ColorSensor.COLOR_RED:\n self.stop()\n break\n\n time.sleep(0.01)\n\n def go_back(self):\n self.come_back = True\n\n def stop_at_color(self, color_to_seek):\n while True:\n self.drive_forward(900, 900)\n if self.color_sensor.color == color_to_seek:\n self.stop()\n break\n time.sleep(0.1)\n\n def go_in(self, color):\n self.arm_up()\n self.turn_degrees(180, 300)\n self.stop_at_color(color)\n self.turn_degrees(90, 300)\n self.drive_inches(5, 300)\n print(233)\n self.arm_down()\n self.drive_inches(-5, 300)\n self.turn_degrees(90, 300)\n self.turn_toward_beacon()\n self.stop_at_color(5)\n\n def go_out(self, color):\n self.turn_degrees(180, 300)\n self.stop_at_color(color)\n self.turn_degrees(90, 300)\n self.drive_inches(5, 300)\n print(233)\n self.arm_up()\n print(666)\n self.drive_inches(-5, 300)\n self.turn_degrees(90, 300)\n self.turn_toward_beacon()\n self.stop_at_color(5)\n self.arm_down()\n\n def turn_toward_beacon(self):\n\n turn_speed = 100\n\n while not self.touch_sensor.is_pressed:\n current_heading = self.beacon_seeker.heading\n current_distance = self.beacon_seeker.distance\n if current_distance == -128:\n print(\"IR Remote not found. Distance is -128\")\n self.stop()\n else:\n if math.fabs(current_heading) < 2:\n print(\"On the right heading. Distance: \",\n current_distance)\n\n\n elif math.fabs(current_heading) >= 2:\n print(\"Adjusting heading: \", current_heading)\n if current_heading < 0:\n self.turn_left(turn_speed)\n\n elif current_heading > 0:\n self.turn_right(turn_speed)\n\n time.sleep(0.2)\n\n # The touch_sensor was pressed to abort the attempt if this code runs.\n\n self.stop()\n return False\n","sub_path":"libs/robot_controller.py","file_name":"robot_controller.py","file_ext":"py","file_size_in_byte":9370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"459125950","text":"import random\r\nfrom tkinter import *\r\n\r\ndef response():\r\n response_phrase = random.choice(RESPONSES)\r\n circletext2.delete(0, END)\r\n circletext2.insert(0,str(response_phrase))\r\n\r\nwindow = Tk()\r\nwindow.title('Magic 8')\r\nwindow.geometry(\"300x100\")\r\nwindow.resizable(0,0)\r\n\r\nRESPONSES = [\"It is certain\", \"It is decidedly so\", \"Without a doubt\",\\\r\n \"You may rely on it\", \"As I see it ,Yes\", \"Most likely\",\\\r\n \"Outlook does not look good\", \"Signs point to yes\", \\\r\n \"Signs point to yes\", \"Reply hazy try again\", \"Ask again later\"\\\r\n \"Better not tell you now\", \"Cannot predict now\", \"Don't count on it\"\\\r\n ,\"My reply is no\", \"My sources say no\", \"Outlook not so good\"\\\r\n \"very doubtful\"]\r\n\r\nbox1 = Label(window, text=\"Q: \")\r\nbox2 = Label(window, text=\"Answer: \")\r\n\r\nbox1.grid(row = 1, column = 1, padx = 5, pady = 5)\r\nbox2.grid(row = 2, column = 1, padx = 5, pady = 5)\r\n\r\ncircleVar = StringVar()\r\ncircletext = Entry(window, textvariable=circleVar)\r\n\r\ncircleVar2 = StringVar()\r\ncircletext2 = Entry(window, textvariable=circleVar2)\r\n\r\ncircletext.grid(row = 1, column = 2)\r\ncircletext2.grid(row = 2, column = 2)\r\n\r\nresponse = Button( window, text = 'Response', command=response)\r\nexitbtn = Button( window, text = 'Exit', command=exit)\r\n\r\nresponse.grid(row = 4, column = 1, padx = 1, pady = 1)\r\nexitbtn.grid(row = 4, column = 2, padx = 1, pady = 1)\r\n\r\nwindow.mainloop()\r\n","sub_path":"Python_Old/201_Using Python 3/8_ball.py","file_name":"8_ball.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"160262983","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport torchvision.transforms as transforms\nfrom torch.utils.data.sampler import Sampler\nfrom torch.utils.data.dataset import Dataset\n\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.utils.net_utils import weights_normal_init, save_net, load_net, \\\n adjust_learning_rate, save_checkpoint, clip_gradient\nfrom model.utils.contrastive_loss import ContrastiveLoss\n\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet\n\nfrom model.utils.plt_loss import plt_loss\n\nfrom model.utils.parser_func import parse_args, set_dataset_args\n\n\nclass DualDataset(Dataset):\n def __init__(self, set1, set2):\n super(DualDataset, self).__init__()\n self.set1 = set1\n self.set2 = set2\n\n def __getitem__(self, item):\n return self.set1[item], self.set2[item]\n\n def __len__(self):\n return len(self.set1)\n\n\nclass sampler(Sampler):\n def __init__(self, train_size, batch_size):\n self.num_data = train_size\n self.num_per_batch = int(train_size / batch_size)\n self.batch_size = batch_size\n self.range = torch.arange(0,batch_size).view(1, batch_size).long()\n self.leftover_flag = False\n if train_size % batch_size:\n self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long()\n self.leftover_flag = True\n\n def __iter__(self):\n rand_num = torch.randperm(self.num_per_batch).view(-1,1) * self.batch_size\n # for contrastive loss, fix_num\n # rand_num = torch.arange(0,self.num_per_batch).view(-1,1) * self.batch_size\n self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range\n\n self.rand_num_view = self.rand_num.view(-1)\n\n if self.leftover_flag:\n self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),0)\n\n return iter(self.rand_num_view)\n\n def __len__(self):\n return self.num_data\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n args = set_dataset_args(args)\n \n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n# print('Using config:')\n# pprint.pprint(cfg)\n np.random.seed(cfg.RNG_SEED)\n\n #torch.backends.cudnn.benchmark = True\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n # train set\n # -- Note: Use validation set and disable the flipped to enable faster loading.\n cfg.TRAIN.USE_FLIPPED = True\n# cfg.TRAIN.USE_FLIPPED = False\n cfg.USE_GPU_NMS = args.cuda\n \n # source dataset\n imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)\n train_size = len(roidb)\n # target dataset\n imdb_t, roidb_t, ratio_list_t, ratio_index_t = combined_roidb(args.imdb_name_target)\n train_size_t = len(roidb_t)\n\n print('{:d} source roidb entries'.format(len(roidb)))\n print('{:d} target roidb entries'.format(len(roidb_t)))\n\n output_dir = args.save_dir + \"/\" + args.net + \"/\" + args.log_ckpt_name\n# output_dir = args.save_dir + \"/\" + args.net + \"/\" + args.dataset_t\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n# dataset_s = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \\\n# imdb.num_classes, training=True)\n\n# dataset_t = roibatchLoader(roidb_t, ratio_list_t, ratio_index_t, args.batch_size, \\\n# imdb.num_classes, training=True)\n\n\n dataset_s = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \\\n imdb.num_classes, training=True)\n\n dataset_t = roibatchLoader(roidb_t, ratio_list_t, ratio_index_t, args.batch_size, \\\n imdb.num_classes, training=True)\n \n dataloader = torch.utils.data.DataLoader(\n DualDataset(dataset_s, dataset_t),\n batch_size=args.batch_size,\n sampler=sampler(train_size, args.batch_size),\n num_workers=args.num_workers,\n pin_memory=True\n )\n\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data)\n im_info = Variable(im_info)\n num_boxes = Variable(num_boxes)\n gt_boxes = Variable(gt_boxes)\n\n if args.cuda:\n cfg.CUDA = True\n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res50':\n fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n lr = cfg.TRAIN.LEARNING_RATE\n lr = args.lr\n #tr_momentum = cfg.TRAIN.MOMENTUM\n #tr_momentum = args.momentum\n\n params = []\n for key, value in dict(fasterRCNN.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), \\\n 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]\n else:\n params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]\n\n if args.cuda:\n fasterRCNN.cuda()\n \n if args.optimizer == \"adam\":\n lr = lr * 0.1\n optimizer = torch.optim.Adam(params)\n\n elif args.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)\n conrastive_criterion = ContrastiveLoss()\n if args.resume:\n load_name = os.path.join(output_dir,\n 'contrastive_faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n# load_name = os.path.join(output_dir,\n# 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n# print(\"loading checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n# args.session = checkpoint['session']\n# args.start_epoch = checkpoint['epoch']\n fasterRCNN.load_state_dict(checkpoint['model'])\n# optimizer.load_state_dict(checkpoint['optimizer'])\n# lr = optimizer.param_groups[0]['lr']\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n print(\"loaded checkpoint %s\" % (load_name))\n\n if args.mGPUs:\n fasterRCNN = nn.DataParallel(fasterRCNN)\n\n iters_per_epoch = int(train_size / args.batch_size)\n\n if args.use_tfboard:\n from tensorboardX import SummaryWriter\n logger = SummaryWriter(\"logs\")\n \n loss1 = []\n loss2 = []\n loss3 = []\n \n for epoch in range(args.start_epoch, args.max_epochs + 1):\n # setting to train mode\n fasterRCNN.train()\n loss_temp = 0\n start = time.time()\n\n# if epoch % (args.lr_decay_step + 1) == 0:\n if (epoch-1) in args.lr_decay_step:\n adjust_learning_rate(optimizer, args.lr_decay_gamma)\n lr *= args.lr_decay_gamma\n \n img_loss = 0\n reg_loss = 0\n \n for step, (data_s, data_t) in enumerate(iter(dataloader)):\n\n # source domain\n with torch.no_grad():\n im_data.resize_(data_s[0].size()).copy_(data_s[0])\n im_info.resize_(data_s[1].size()).copy_(data_s[1])\n gt_boxes.resize_(data_s[2].size()).copy_(data_s[2])\n num_boxes.resize_(data_s[3].size()).copy_(data_s[3])\n if im_data.size(0) != args.batch_size:\n continue\n\n #use gt as rois\n# t1 = time.time()\n# with torch.no_grad():\n# num_box = num_boxes.min()\n# tmp_boxes = gt_boxes[:, :num_box, :4]\n# num_image = tmp_boxes.size(0)\n# img_index = []\n# for i in range(num_image): \n# tmp = np.ones((num_box,1))*i\n# img_index += [tmp]\n# img_index = torch.tensor(img_index).float().cuda()\n# rois = torch.cat([img_index, tmp_boxes],2)\n# t2 = time.time()\n# print('time: {}'.format(t2-t1))\n# pdb.set_trace()\n \n #use gt as rois\n# img_index = []\n# tmp_boxes = []\n# for i in range(gt_boxes.size(0)):\n# # print('type(gt_boxes[i, :num_boxes[i]]):{}'.format(type(gt_boxes[i, :num_boxes[i]])))\n# # pdb.set_trace()\n# tmp_boxes += [gt_boxes[i, :num_boxes[i]]]\n# tmp = np.ones((num_boxes[i],1))*i\n# img_index += [tmp]\n# img_index = torch.tensor(img_index).float().cuda()\n# boxes = tmp_boxes[:, :, :4]\n# rois = torch.cat([img_index, boxes],2)\n\n #two_level\n fasterRCNN.zero_grad()\n# base_feat_s, pooled_feat_s = fasterRCNN(im_data, im_info, gt_boxes, num_boxes, mode='two_level', gt_rois=rois)\n base_feat_s, pooled_feat_s = fasterRCNN(im_data, im_info, gt_boxes, num_boxes, mode='two_level')\n # target domain\n with torch.no_grad():\n im_data.resize_(data_t[0].size()).copy_(data_t[0])\n im_info.resize_(data_t[1].size()).copy_(data_t[1])\n gt_boxes.resize_(data_t[2].size()).copy_(data_t[2])\n num_boxes.resize_(data_t[3].size()).copy_(data_t[3])\n \n# base_feat_t, pooled_feat_t = fasterRCNN(im_data, im_info, gt_boxes, num_boxes, mode='two_level', gt_rois=rois)\n base_feat_t, pooled_feat_t = fasterRCNN(im_data, im_info, gt_boxes, num_boxes, mode='two_level')\n\n# contrastive_loss1 = conrastive_criterion(base_feat_s.view(args.batch_size, -1), \n# base_feat_t.view(args.batch_size, -1), \n# t=args.t)\n# print('num_boxes: {}'.format(num_boxes))\n# print('base_feat_s.shape: {}'.format(base_feat_s.shape))\n# print('base_feat_t.shape: {}'.format(base_feat_t.shape))\n# print('pooled_feat_s.shape: {}'.format(pooled_feat_s.shape))\n# print('pooled_feat_t.shape: {}'.format(pooled_feat_t.shape))\n# pdb.set_trace()\n# t1 = time.time()\n contrastive_loss1 = conrastive_criterion(base_feat_s.view(base_feat_s.size(0), -1), \n base_feat_t.view(base_feat_t.size(0), -1), \n t=args.t)\n contrastive_loss2 = conrastive_criterion(pooled_feat_s, \n pooled_feat_t, \n t=args.t)\n loss = contrastive_loss1 * (1-args.lambd) + contrastive_loss2 * args.lambd\n# t2 = time.time()\n# print('time: {}'.format(t2-t1))\n# pdb.set_trace()\n loss_temp += loss.item()\n\n # backward\n optimizer.zero_grad()\n loss.backward()\n if args.net == \"vgg16\":\n clip_gradient(fasterRCNN, 10.)\n optimizer.step()\n \n \n img_loss += contrastive_loss1.mean().item()\n reg_loss += contrastive_loss2.mean().item()\n \n if step % args.disp_interval == 0:\n end = time.time()\n if step > 0:\n loss_temp /= (args.disp_interval + 1)\n\n if args.mGPUs:\n loss_contrastive1 = contrastive_loss1.mean().item()\n loss_contrastive2 = contrastive_loss2.mean().item()\n else:\n loss_contrastive1 = contrastive_loss1.item()\n loss_contrastive2 = contrastive_loss2.item()\n \n \n print(\"[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e\" \\\n % (args.session, epoch, step, iters_per_epoch, loss_temp, lr))\n print(\"\\t\\t\\ttime cost: %f\" % (end-start))\n print(\"\\t\\t\\tloss_contrastive 1: %.4f, loss_contrastive 2: %.4f\" % (loss_contrastive1, loss_contrastive2))\n if args.use_tfboard:\n info = {\n 'loss': loss_temp,\n 'loss_contrastive1': tloss_contrastive1,\n 'loss_contrastive2': loss_contrastive2\n }\n logger.add_scalars(\"logs_s_{}/losses\".format(args.session), info, (epoch - 1) * iters_per_epoch + step)\n\n loss_temp = 0\n start = time.time()\n \n loss1.append(img_loss/step)\n loss2.append(reg_loss/step)\n loss3.append((img_loss*(1-args.lambd)+reg_loss*args.lambd)/step)\n plt_loss(epoch, 'output/{}'.format(args.dataset_t), 'Image Level', loss1)\n plt_loss(epoch, 'output/{}'.format(args.dataset_t), 'Region Level', loss2)\n plt_loss(epoch, 'output/{}'.format(args.dataset_t), 'Total', loss3)\n \n save_name = os.path.join(output_dir, 'contrastive_faster_rcnn_{}_{}_{}.pth'.format(args.session, epoch, step))\n save_checkpoint({\n 'session': args.session,\n 'epoch': epoch + 1,\n 'model': fasterRCNN.module.state_dict() if args.mGPUs else fasterRCNN.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'pooling_mode': cfg.POOLING_MODE,\n 'class_agnostic': args.class_agnostic,\n }, save_name)\n print('save model: {}'.format(save_name))\n\n if args.use_tfboard:\n logger.close()\n","sub_path":"train_S2T_contrast.py","file_name":"train_S2T_contrast.py","file_ext":"py","file_size_in_byte":13657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"217882212","text":"from loris.info.abstract_extractor import AbstractExtractor\nfrom loris.info.abstract_extractor import BITONAL_QUALITIES\nfrom loris.info.abstract_extractor import COLOR_QUALITIES\nfrom loris.info.abstract_extractor import GRAY_QUALITIES\nfrom loris.info.info_data import InfoData\nfrom math import ceil\nfrom PIL import Image\n\nMODES_TO_QUALITIES = {\n '1': BITONAL_QUALITIES,\n 'L': GRAY_QUALITIES,\n 'LA': GRAY_QUALITIES,\n 'P': GRAY_QUALITIES,\n 'RGB': COLOR_QUALITIES,\n 'RGBA': COLOR_QUALITIES,\n 'CMYK': COLOR_QUALITIES,\n 'YCbCr': COLOR_QUALITIES,\n 'I': COLOR_QUALITIES,\n 'F': COLOR_QUALITIES\n}\n\nCOLOR_MODES = ('RGB', 'RGBA', 'CMYK', 'YCbCr', 'I', 'F')\n\nclass PillowExtractor(AbstractExtractor):\n # See comments in AbstractExtractor (in this module) for how this is\n # intended to work.\n\n def __init__(self, compliance, app_configs):\n super().__init__(compliance, app_configs)\n sf = app_configs['scale_factors']['other_formats']\n self.include_scale_factors = sf['enabled'] and self.compliance == 0\n if self.include_scale_factors:\n self.tile_w = sf['tile_width']\n self.tile_h = sf['tile_height']\n\n def extract(self, path, http_identifier):\n info_data = InfoData(self.compliance, http_identifier)\n pillow_image = Image.open(path)\n w, h = pillow_image.size\n info_data.width, info_data.height = (w, h)\n info_data.profile = self._make_profile(pillow_image)\n max_size = PillowExtractor.max_size(w, h, max_area=self.max_area, \\\n max_width=self.max_width, max_height=self.max_height)\n info_data.sizes = [ max_size ]\n if self.include_scale_factors:\n tiles, sizes = self.level_zero_tiles_and_sizes(max_size['width'], \\\n max_size['height'], self.tile_w, self.tile_h)\n info_data.tiles = tiles\n if info_data.width == max_size['width']:\n info_data.sizes.extend(sizes[1:])\n else:\n info_data.sizes.extend(sizes)\n return info_data\n\n @staticmethod\n def is_color(pillow_image):\n return pillow_image.mode in COLOR_MODES\n\n def level_zero_tiles_and_sizes(self, image_w, image_h, tile_w, tile_h):\n # These are designed to work w/ OSd, hence ceil().\n tiles = PillowExtractor._level_zero_tiles(image_w, image_h, tile_w, tile_h)\n # Always a chance that the default tile size is larger than the image:\n smallest_scale = 1\n if tiles is not None:\n smallest_scale = tiles[0]['scaleFactors'][-1]\n sizes = PillowExtractor._level_zero_sizes(smallest_scale, image_w, image_h)\n return (tiles, sizes)\n\n @classmethod\n def _level_zero_tiles(cls, image_w, image_h, tile_w, tile_h):\n long_image_dimenson = max(image_w, image_h)\n long_tile_dimenson = max(tile_w, tile_h)\n scales = [1]\n while (long_image_dimenson / scales[-1]) > long_tile_dimenson:\n nxt = scales[-1]*2\n if (long_image_dimenson / nxt) > long_tile_dimenson:\n scales.append(nxt)\n else:\n return cls._structure_tiles(tile_w, tile_h, scales)\n\n @classmethod\n def _level_zero_sizes(cls, smallest_scale_factor, image_w, image_h):\n sizes = [ ]\n scale = smallest_scale_factor\n w = ceil(image_w / scale)\n h = ceil(image_h / scale)\n while any([d != 1 for d in (w,h)]):\n sizes.append(cls._structure_size(w, h))\n scale = scale*2\n w = ceil(image_w / scale)\n h = ceil(image_h / scale)\n return sizes\n\n def _make_profile(self, pillow_image):\n include_color = PillowExtractor.is_color(pillow_image)\n profile = self.compliance.to_profile(include_color=include_color, \\\n max_area=self.max_area, max_width=self.max_width, \\\n max_height=self.max_height)\n return profile\n","sub_path":"loris/info/pillow_extractor.py","file_name":"pillow_extractor.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"367524770","text":"from keras.datasets import cifar10, mnist\nfrom keras.utils import Sequence, to_categorical\nimport numpy as np\nimport util\n\n\nclass Generator(Sequence):\n def __init__(self, n, batch_size, decoder, include_label, shuffle):\n self.n = n\n self.batch_size = batch_size\n self.decoder = decoder\n self.include_label = include_label\n self.shuffle = shuffle\n self.samples = []\n self.labels = []\n self.index_array = np.arange(n)\n\n def __len__(self):\n return (self.n + self.batch_size - 1) // self.batch_size\n\n def __getitem__(self, i):\n if i >= len(self):\n raise ValueError(f'Asked to retrieve element {i}, but the Sequence has length {len(self)}')\n indices = self.index_array[self.batch_size*i:self.batch_size*(i+1)]\n sample = self.samples[indices]\n label = self.labels[indices]\n\n if self.decoder:\n new_sample = [sample, label]\n label = [label, sample]\n sample = new_sample\n\n if self.include_label:\n return (sample, label)\n return sample\n\n def on_epoch_end(self):\n if self.shuffle:\n np.random.shuffle(self.index_array)\n\n\nclass CIFARGenerator(Generator):\n def __init__(self, partition, batch_size=10, decoder=False, include_label=True, shuffle=False):\n (x_train_all, y_train_all), (x_test, y_test) = cifar10.load_data()\n split_index = len(x_train_all) * 9 // 10\n if partition == 'train':\n x = x_train_all[:split_index] / 255\n y = y_train_all[:split_index]\n elif partition == 'val':\n x = x_train_all[split_index:] / 255\n y = y_train_all[split_index:]\n elif partition == 'pred':\n x = x_test[:20] / 255\n y = y_test[:20]\n elif partition == 'test':\n x = x_test / 255\n y = y_test\n else:\n raise ValueError(f'Partition {partition} not valid.')\n\n super().__init__(len(x), batch_size, decoder, include_label, shuffle)\n\n self.samples = x\n self.labels = to_categorical(y, num_classes=10)\n\n\nclass MNISTGenerator(Generator):\n def __init__(self, partition, batch_size=10, decoder=False, include_label=True, shuffle=False):\n (x_train_all, y_train_all), (x_test, y_test) = mnist.load_data()\n split_index = len(x_train_all) * 9 // 10\n if partition == 'train':\n x = x_train_all[:split_index] / 255\n y = y_train_all[:split_index]\n elif partition == 'val':\n x = x_train_all[split_index:] / 255\n y = y_train_all[split_index:]\n elif partition == 'pred':\n x = x_test[:20] / 255\n y = y_test[:20]\n elif partition == 'test':\n x = x_test / 255\n y = y_test\n else:\n raise ValueError(f'Partition {partition} not valid.')\n\n super().__init__(len(x), batch_size, decoder, include_label, shuffle)\n\n self.samples = np.pad(x, ((0,0), (2,2), (2,2)), 'constant')[...,np.newaxis] # pad with 0s to 32 x 32\n self.labels = to_categorical(y, num_classes=10)\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"213978784","text":"from pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SQLContext, SparkSession,Row\n#from pyspark.sql.functions import *\nfrom pyspark.sql import functions as f\nfrom pyspark.sql.types import *\n#from pyspark.storagelevel import StorageLevel\nfrom pyspark.sql.functions import col,max as max_,concat,concat_ws,year,when,month,to_date,lit,quarter,expr,sum,count,desc\nimport datetime, time\nimport datetime as dt\nimport re\nfrom pyspark import StorageLevel\nfrom pyspark.sql import functions as F\nimport pandas as pd\nimport os,sys,subprocess\nfrom os.path import dirname, join, abspath\n\nhelpersDir = '/home/padmin/KockpitStudio'\nsys.path.insert(0, helpersDir)\nfrom ConfigurationFiles.AppConfig import *\nfrom Helpers.Constants import *\nfrom Helpers import udf\n\ndef purchase_PurchaseCreditMemo(sqlCtx, spark):\n st = dt.datetime.now()\n logger = Logger()\n\n try:\n \n if datetime.date.today().month>int(MnSt)-1:\n UIStartYr=datetime.date.today().year-int(yr)+1\n else:\n UIStartYr=datetime.date.today().year-int(yr)\n\n pcmlEntity = next (table for table in config[\"TablesToIngest\"] if table[\"Table\"] == \"Purch_ Cr_ Memo Line\") \n pcmhEntity = next (table for table in config[\"TablesToIngest\"] if table[\"Table\"] == \"Purch_ Cr_ Memo Hdr_\")\n gpsEntity = next (table for table in config[\"TablesToIngest\"] if table[\"Table\"] == \"General Posting Setup\") \n pldEntity = next (table for table in config[\"TablesToIngest\"] if table[\"Table\"] == \"Posted Str Order Line Details\")\n veEntity = next (table for table in config[\"TablesToIngest\"] if table[\"Table\"] == \"Value Entry\")\n \n for entityObj in config[\"DbEntities\"]:\n logger = Logger()\n entityLocation = entityObj[\"Location\"]\n x = entityLocation.index(\"E\")\n DBName = entityLocation[:3]\n EntityName = entityLocation[-2:]\n hdfspath = STAGE1_PATH + \"/\" + entityLocation\n postgresUrl = PostgresDbInfo.url.format(entityLocation)\n\n pcml = udf.ToDFWitoutPrefix(sqlCtx, hdfspath, pcmlEntity, True)\n pcmh = udf.ToDFWitoutPrefix(sqlCtx, hdfspath, pcmhEntity, True)\n gps = udf.ToDFWitoutPrefix(sqlCtx, hdfspath, gpsEntity, True)\n pld = udf.ToDFWitoutPrefix(sqlCtx, hdfspath, pldEntity, True)\n VE = udf.ToDFWitoutPrefix(sqlCtx, hdfspath, veEntity, True)\n\n #pil = pil.na.fill({'Gen_Bus_PostingGroup':'NA','Gen_Prod_PostingGroup':'NA','DocumentNo_':'NA','LineNo_':'NA','PostingDate':'NA'})\n pcml = pcml.filter((year(col(\"PostingDate\"))!='1753')&(col(\"Quantity\")!=0)&(col(\"Type\")!=4))\n Line = pcml\\\n .withColumn(\"LinkItem\",when(pcml[\"Type\"]==2,when(pcml[\"No_\"]=='',lit(\"NA\")).otherwise(pcml[\"No_\"])))\\\n .withColumn(\"No\",when(col(\"Type\")==1,col(\"No_\").cast('int')))\\\n .withColumn(\"GL_Link\",concat_ws(\"|\",lit(entityLocation),pcml.Gen_Bus_PostingGroup,pcml.Gen_Prod_PostingGroup))\\\n .withColumn(\"LinkValueEntry\",concat_ws(\"|\",pcml.DocumentNo_.cast('string'),pcml.LineNo_.cast('string'),to_date(pcml.PostingDate).cast('string')))\\\n .withColumn(\"Link_GD\",concat_ws(\"-\",pcml.DocumentNo_,pcml.LineNo_))\\\n .withColumn(\"LinkLocationCode\",when(col(\"LocationCode\")=='',\"NA\").otherwise(col(\"LocationCode\")))\\\n .withColumn(\"InvoicedQuantity\",when(col(\"Type\")==2,col(\"Quantity\")*(-1)))\\\n .withColumn(\"ServiceTaxAmount\",(pcml.ServiceTaxeCessAmount+pcml.ServiceTaxSHECessAmount+pcml.ServiceTaxAmount)*(-1))\\\n .withColumnRenamed(\"DimensionSetID\",\"DimSetID\").withColumnRenamed(\"DocumentNo_\",\"Document_No\").withColumnRenamed(\"LineNo_\",\"LineNo\")\\\n .withColumnRenamed(\"Description\",\"LineDescription\").withColumnRenamed(\"Quantity\",\"Trn_Quantity\")\\\n .withColumn(\"CrMemoAmount\",col(\"Amount\")*(-1))\\\n .withColumn(\"TaxAmount\",col(\"TaxAmount\")*(-1))\\\n .withColumn(\"ExciseAmount\",col(\"ExciseAmount\")*(-1))\\\n .withColumnRenamed(\"Inv_DiscountAmount\",\"InvDiscountAmount\")\\\n .withColumnRenamed(\"Buy-from Vendor No_\",\"BuyfromVendorNumber\")\n \n Line = Line.withColumn('PurchaseTaxAmount',Line['TaxAmount'])\n '''\n .withColumn(\"No\",when(pcml.Type=='1',pcml['No_'].cast('int')))\\\n .withColumn(\"GL_Link\",concat_ws('|',lit(entityLocation),pcml.Gen_Bus_PostingGroup.cast('string'), pcml.Gen_Prod_PostingGroup.cast('string')))\\\n .withColumn(\"LinkValueEntry\",concat_ws('|',pcml.DocumentNo_.cast('string'),pcml.LineNo_.cast('string'),to_date(pcml.PostingDate).cast('string')))\\\n .withColumn(\"Link_GD\",concat_ws('-',pcml.DocumentNo_.cast('string'),pcml.LineNo_.cast('string')))\\\n .withColumn(\"LinkLocationCode\",when(pcml.LocationCode=='',lit(\"NA\")).otherwise(pcml.LocationCode))\\\n .withColumn(\"CrMemoAmount\",col(\"Amount\")*(-1))\\\n .withColumn(\"TaxAmount\",col(\"TaxAmount\")*(-1))\\\n .withColumn(\"ExciseAmount\",col(\"ExciseAmount\")*(-1))\\\n .withColumnRenamed(\"Inv_DiscountAmount\",\"InvDiscountAmount\")\\\n .withColumnRenamed(\"Buy-from Vendor No_\",\"BuyfromVendorNumber\")\n '''\n #Line = udf.RENAME(Line,{\"DimensionSetID\":\"DimSetID\",\"DocumentNo_\":\"Document_No\",\"Amount\":\"PurchaseAmount\"})\n Line = Line.drop('PostingDate')\n \n Header = pcmh\\\n .withColumn(\"LinkVendor\",when(pcmh['Pay-toVendorNo_']=='','NA').otherwise(pcmh[\"Pay-toVendorNo_\"])).drop(\"Pay-toVendorNo_\",\"DBName\",\"EntityName\")\\\n .withColumn(\"PaytoName\",pcmh['Pay-toName']+\" \"+pcmh['Pay-toName2']).drop('Pay-toName','Pay-toName2')\\\n .withColumn(\"PaytoAddress\",pcmh['Pay-toAddress']+\" \"+pcmh['Pay-toAddress2']).drop('Pay-toAddress','Pay-toAddress2')\\\n .withColumn(\"ShiptoName\",pcmh['Ship-toName']+\" \"+pcmh['Ship-toName2']).drop('Ship-toName','Ship-toName2')\\\n .withColumn(\"ShiptoAddress\",pcmh['Ship-toAddress']+\" \"+pcmh['Ship-toAddress2']).drop('Ship-toAddress','Ship-toAddress2')\\\n .withColumn(\"LinkDate\",to_date(pcmh.PostingDate))\\\n .withColumn(\"LinkPurchaseRep\",when(pcmh.PurchaserCode=='','NA').otherwise(pcmh.PurchaserCode)).drop(\"PurchaserCode\")\\\n .withColumn(\"BuyfromVendorName\",pcmh[\"Buy-fromVendorName\"]+\" \"+pcmh[\"Buy-fromVendorName2\"]).drop(\"Buy-fromVendorName\",\"Buy-fromVendorName2\")\\\n .withColumn(\"Years\",when(month(pcmh.PostingDate)>=MnSt,year(pcmh.PostingDate)).otherwise(year(pcmh.PostingDate)-1))\\\n .withColumn(\"Quarters\",when(month(pcmh.PostingDate)>=MnSt,concat(lit('Q'),(quarter(pcmh.PostingDate)-1))).otherwise(lit('Q4')))\\\n .withColumn(\"MonthNum\", month(pcmh.PostingDate))\\\n .withColumnRenamed(\"No_\",\"Purchase_No\").withColumnRenamed(\"Pay-toCity\",\"PaytoCity\").withColumnRenamed(\"Ship-toCode\",\"ShiptoCode\")\\\n .withColumnRenamed(\"Ship-toCity\",\"ShiptoCity\").withColumnRenamed(\"OnHold\",\"HoldStatus\")\\\n .withColumnRenamed(\"LocationCode\",\"HeaderLocationCode\")\\\n .withColumnRenamed(\"Buy-fromCity\",\"BuyfromCity\").withColumnRenamed(\"Pay-toPostCode\",\"PaytoPostCode\")\\\n .withColumnRenamed(\"Ship-toPostCode\",\"ShiptoPostCode\")\n ''' \n .withColumn(\"LinkVendor\",when(pcmh['Pay-toVendorNo_']=='',lit(\"NA\")).otherwise(pcmh[\"Pay-toVendorNo_\"])).drop(\"Pay-toVendorNo_\")\\\n .withColumn(\"MonthNum\", month(pcmh.PostingDate))\\\n .withColumn(\"LinkPurchaseRep\",when(pcmh.PurchaserCode=='',lit(\"NA\")).otherwise(pcmh.PurchaserCode)).drop(\"PurchaserCode\")\\\n '''\n #Header = udf.RENAME(Header,{\"No_\":\"Purchase_No\",\"LocationCode\":\"HeaderLocationCode\"})\n\n Monthsdf = udf.MONTHSDF(sqlCtx)\n mcond = [Header.MonthNum==Monthsdf.MonthNum1]\n Header = udf.LJOIN(Header,Monthsdf,mcond)\n Header = Header.drop('MonthNum','MonthNum1')\n \n GL_Master = gps.withColumn(\"GL_LinkDrop\",concat_ws('|',lit(entityLocation),gps.Gen_Bus_PostingGroup,gps.Gen_Prod_PostingGroup)).drop('Gen_Bus_PostingGroup','Gen_Prod_PostingGroup')\\\n .withColumn(\"GLAccount\",when(gps.Purch_Account=='',0).otherwise(gps.Purch_Account)).drop('Purch_Account')\n \n data=[{'GLCategory':'Purchase Trading','FromGL':402000,'ToGL':406500}]\n GLRange = sqlCtx.createDataFrame(data)\n \n cond = [Line.Document_No == Header.Purchase_No]\n Purchase = udf.LJOIN(Line,Header,cond)\n\n cond1 = [Purchase.GL_Link == GL_Master.GL_LinkDrop]\n Purchase = udf.LJOIN(Purchase,GL_Master,cond1)\n Purchase = Purchase.drop(\"GL_LinkDrop\",\"GL_Link\")\n \n Range='1=1'\n Range1='1=1'\n NoOfRows=GLRange.count()\n for i in range(0,NoOfRows):\n if i==0:\n Range = (Purchase.GLAccount>=GLRange.select('FromGL').collect()[0][\"FromGL\"])\\\n & (Purchase.GLAccount<=GLRange.select('ToGL').collect()[0][\"ToGL\"])\n \n Range1 = (Purchase.No>=GLRange.select('FromGL').collect()[0][\"FromGL\"])\\\n & (Purchase.No<=GLRange.select('ToGL').collect()[0][\"ToGL\"])\n else:\n Range = (Range) | (Purchase.GLAccount>=GLRange.select('FromGL').collect()[i][\"FromGL\"])\\\n & (Purchase.GLAccount<=GLRange.select('ToGL').collect()[i][\"ToGL\"])\n \n Range1 = (Range1) | (Purchase.No>=GLRange.select('FromGL').collect()[i][\"FromGL\"])\\\n & (Purchase.No<=GLRange.select('ToGL').collect()[i][\"ToGL\"])\n Purchase = Purchase.filter(((Purchase.Type!=2) | ((Purchase.Type==2) & (Range))) & ((Purchase.Type!=1) | ((Purchase.Type==1) & (Range1))))\n Purchase = Purchase.withColumn(\"PurchaseAccount\",when(Purchase.Type==2,Purchase.GLAccount).otherwise(when(Purchase.Type==1,Purchase.No).otherwise(lit(1))))\n #Purchase.filter(Purchase['Document_No']=='PCR2152122-008').show()\n #sys.exit()\n pld = pld.filter((pld.Type == 2) & (pld.DocumentType==2) & (pld.Tax_ChargeType==0))\n LineDetails = pld.withColumn(\"Link_GDDrop\",concat_ws('-',pld.InvoiceNo_,pld.LineNo_)).drop(\"InvoiceNo_\",\"LineNo_\")\\\n .withColumnRenamed(\"AccountNo_\",\"AccountNo\")\n Range2 = LineDetails['AccountNo']!=0\n NoOfRows = GLRange.count()\n for j in range(0,NoOfRows):\n if i==0:\n FromGL = \"%s\"%GLRange.select(GLRange.FromGL).collect()[0]['FromGL']\n ToGL = \"%s\"%GLRange.select(GLRange.ToGL).collect()[0]['ToGL']\n Range2 = (LineDetails['AccountNo']>=FromGL) & (LineDetails['AccountNo']<=ToGL)\n else:\n FromGL = \"%s\"%GLRange.select(GLRange.FromGL).collect()[i]['FromGL']\n ToGL = \"%s\"%GLRange.select(GLRange.ToGL).collect()[i]['ToGL']\n Range2 = (LineDetails['AccountNo']>=FromGL) & (LineDetails['AccountNo']<=ToGL)\n LineDetails = LineDetails[Range2]\n LineDetails=LineDetails.groupby('Link_GDDrop').sum('Amount').withColumnRenamed('sum(Amount)','ChargesfromVendor')\n \n cond2 = [Purchase.Link_GD == LineDetails.Link_GDDrop]\n Purchase = udf.LJOIN(Purchase,LineDetails,cond2)\n Purchase = Purchase.drop(\"Link_GDDrop\")\n\n Purchase = Purchase.withColumn('TransactionType',lit('CrMemo'))\n #Purchase.filter(Purchase['Document_No']=='PCR2152122-008').show()\n #sys.exit()\n\n Purchase = Purchase\\\n .withColumn('GLAccountNo',when(Purchase.GLAccount.isNull(), Purchase.No).otherwise(Purchase.GLAccount))\\\n .withColumn('PayableAmount',when((Purchase.CurrencyFactor==0) | (Purchase.CurrencyFactor.isNull()),when(Purchase.ChargesfromVendor.isNull(),Purchase.CrMemoAmount)\\\n .otherwise(Purchase.CrMemoAmount+Purchase.ChargesfromVendor)).otherwise(when(Purchase.ChargesfromVendor.isNull(),(Purchase.CrMemoAmount/Purchase.CurrencyFactor))\\\n .otherwise(((Purchase.CrMemoAmount+Purchase.ChargesfromVendor)/(Purchase.CurrencyFactor)))))\n \n Purchase = Purchase.withColumn('LinkLocation',when(Purchase.LinkLocationCode.isNull(),Purchase.HeaderLocationCode).otherwise(Purchase.LinkLocationCode))\n Purchase = Purchase.drop('PurchaseAccount')\n Purchase = Purchase.withColumn('LinkDate',to_date(Purchase['PostingDate']))\n FieldName = \"CostAmount(Expected)\"\n '''\n Field = sqlctx.read.parquet(hdfspath+\"/Data/FieldSelection\")\n Field = Field.filter(Field['Flag'] == 1).filter(Field['TableName'] == 'VE FieldSelection')\n Field = Field.select(\"FieldType\",\"Flag\",\"TableName\")\n FieldName = Field.select('FieldType').collect()[0][\"FieldType\"]\n FieldName = re.sub('[\\s+]', '', FieldName)\n '''\n ValueEntry = VE.withColumn(\"LinkDate\",to_date(VE.PostingDate))\\\n .withColumn(\"LinkValueEntry1\",concat_ws(\"|\",VE.DocumentNo_,VE.DocumentLineNo_,to_date(VE.PostingDate).cast('string')))\\\n \n ValueEntry = udf.RENAME(ValueEntry,{\"DimensionSetID\":\"DimSetID\",\"ItemNo_\":\"LinkItem\",\"DocumentNo_\":\"DocumentNo\",\"ItemLedgerEntryNo_\":\"LinkILENo\"})\n \n if FieldName=='CostAmount(Expected)':\n ValueEntry = ValueEntry.withColumn(\"CostAmount\",when(ValueEntry.CostAmountActual==0,ValueEntry.CostAmountExpected*(-1)).otherwise(ValueEntry.CostAmountActual*(-1)))\n else:\n ValueEntry = ValueEntry.withColumn(\"CostAmount\",ValueEntry[FieldName]*(-1))\n \n ValueEntry1 = ValueEntry.select('LinkValueEntry1','CostAmount').groupby('LinkValueEntry1')\\\n .agg(sum(\"CostAmount\").alias(\"CostAmount\"))\n \n JoinCOGS = [Purchase.LinkValueEntry == ValueEntry1.LinkValueEntry1]\n Purchase = udf.LJOIN(Purchase,ValueEntry1,JoinCOGS)\n Purchase = Purchase.drop(\"CrMemoAmount\",\"LinkValueEntry1\")\n\n Documents = Purchase.select('Document_No').distinct()\n Documents = Documents.withColumn('SysDocFlag',lit(1)).withColumnRenamed('Document_No','Document_NoDrop')\n \n PurchaseInvQuery=\"(SELECT * FROM purchase.purchaseinvoice) AS PurchaseInv\"\n PurchaseInvoice = sqlCtx.read.format(\"jdbc\").options(url=postgresUrl, dbtable=PurchaseInvQuery, user=PostgresDbInfo.props[\"user\"], password=PostgresDbInfo.props[\"password\"] ,driver= PostgresDbInfo.props[\"driver\"]).load()\n\n DocumentsCrMemo = PurchaseInvoice.select(\"Document_No\").distinct()\n DocumentsCrMemo = DocumentsCrMemo.withColumnRenamed(\"Document_No\",\"Document_NoDrop\").withColumn(\"SysDocFlag\",lit(2))\n Documents=Documents.unionAll(DocumentsCrMemo)\n \n JoinDocs = [ValueEntry.DocumentNo == Documents.Document_NoDrop]\n ValueEntry = udf.LJOIN(ValueEntry,Documents,JoinDocs)\n ValueEntry = ValueEntry.drop(\"Document_NoDrop\")\n \n SysEntries = Purchase.select('LinkValueEntry').distinct()\n SysEntries = SysEntries.withColumn('SysValueEntryFlag',lit(1))\n JoinSysEntry = [ValueEntry.LinkValueEntry1 == SysEntries.LinkValueEntry]\n ValueEntry = udf.LJOIN(ValueEntry,SysEntries,JoinSysEntry)\n ValueEntry = ValueEntry.drop(\"LinkValueEntry\").withColumnRenamed(\"LinkValueEntry1\",\"LinkValueEntry\")\n\n ValueEntry = ValueEntry.filter(((ValueEntry.SysDocFlag==1)&(ValueEntry.SysValueEntryFlag.isNull()))|((ValueEntry.SysDocFlag.isNull())&(ValueEntry.ItemLedgerEntryType==1)))\n \n ValueEntry = ValueEntry.withColumnRenamed('DocumentNo','Document_No').withColumnRenamed('DocumentLineNo_','LineNo')\\\n .withColumnRenamed('EntryNo_','Link_GD').withColumn('Trn_Quantity',ValueEntry['InvoicedQuantity'])\\\n .withColumn('RevenueType',lit('OTHER')).withColumn('TransactionType',lit('RevaluationandRemainingEntries'))\\\n .withColumnRenamed('LocationCode','LinkLocation')\n \n dse = sqlCtx.read.parquet(hdfspath + \"/DSE\").drop(\"DBName\",\"EntityName\")\n dse = udf.RENAME(dse,{\"DimensionSetID\":\"DimSetID1\"})\n\n scond = [Purchase.DimSetID==dse.DimSetID1]\n Purchase = udf.LJOIN(Purchase,dse,scond)\n Purchase = Purchase.drop('DimSetID1')\n\n vcond = [ValueEntry.DimSetID==dse.DimSetID1]\n ValueEntry = udf.LJOIN(ValueEntry,dse,vcond)\n\n ValueEntry = ValueEntry.withColumn(\"PostingDate\",col(\"PostingDate\").cast('string'))\\\n .withColumn(\"LinkDate\",col(\"LinkDate\").cast('string')).drop('DimSetID1')\n \n Purchase = Purchase.withColumn(\"PostingDate\",col(\"PostingDate\").cast('string'))\\\n .withColumn(\"ExpectedReceiptDate\",col(\"ExpectedReceiptDate\").cast('string'))\\\n .drop('LocationCode','Amount','ServiceTaxSHECessAmount','Sell-toCustomerName2','GLCode','Link_GDDrop')\n #Purchase.filter(Purchase['Document_No']=='PCR2152122-008').show()\n #sys.exit()\n Purchase = udf.CONCATENATE(Purchase,ValueEntry,spark)\n\n Purchase = Purchase.withColumnRenamed('LinkPurchaseRep','LinkPurchaser')\n \n Purchase = Purchase.drop('BudColumn')\n #//////////////////////////Writing data//////////////////////////\n \n Purchase = Purchase.withColumn('DBName',lit(DBName)).withColumn('EntityName',lit(EntityName))\n Purchase = Purchase.withColumn('LinkVendorKey',concat_ws('|',Purchase.DBName,Purchase.EntityName,Purchase.LinkVendor))\\\n .withColumn('LinkDateKey',concat_ws('|',Purchase.DBName,Purchase.EntityName,Purchase.LinkDate))\\\n .withColumn('LinkPurchaserKey',concat_ws('|',Purchase.DBName,Purchase.EntityName,Purchase.LinkPurchaser))\\\n .withColumn('LinkItemKey',concat_ws('|',Purchase.DBName,Purchase.EntityName,Purchase.LinkItem))\\\n .withColumn('LinkLocationKey',concat_ws('|',Purchase.DBName,Purchase.EntityName,Purchase.LinkLocation))\n PurchaseInv = udf.CONCATENATE(Purchase,PurchaseInvoice,spark)\n PurchaseInv = PurchaseInv.na.fill({\"PayableAmount\":0})\n \n PurchaseInv.cache()\n\n PurchaseInv.write.jdbc(url=postgresUrl, table=\"Purchase.purchase\", mode='overwrite', properties=PostgresDbInfo.props)\n \n logger.endExecution()\n \n try:\n IDEorBatch = sys.argv[1]\n except Exception as e :\n IDEorBatch = \"IDLE\"\n\n log_dict = logger.getSuccessLoggedRecord(\"Purchase.PurchaseCreditMemo\", DBName, EntityName, Purchase.count(), len(Purchase.columns), IDEorBatch)\n log_df = spark.createDataFrame(log_dict, logger.getSchema())\n log_df.write.jdbc(url=PostgresDbInfo.logsDbUrl, table=\"logtable\", mode='append', properties=PostgresDbInfo.props)\n \n except Exception as ex:\n exc_type,exc_value,exc_traceback=sys.exc_info()\n print(\"Error:\",ex)\n print(\"type - \"+str(exc_type))\n print(\"File - \"+exc_traceback.tb_frame.f_code.co_filename)\n print(\"Error Line No. - \"+str(exc_traceback.tb_lineno))\n\n logger.endExecution()\n\n try:\n IDEorBatch = sys.argv[1]\n except Exception as e :\n IDEorBatch = \"IDLE\"\n \n log_dict = logger.getErrorLoggedRecord('Purchase.PurchaseCreditMemo', DBName, EntityName, str(ex), str(exc_traceback.tb_lineno), IDEorBatch)\n log_df = spark.createDataFrame(log_dict, logger.getSchema())\n log_df.write.jdbc(url=PostgresDbInfo.logsDbUrl, table=\"logtable\", mode='append', properties=PostgresDbInfo.props)\n print('purchase_PurchaseCreditMemo completed: ' + str((dt.datetime.now()-st).total_seconds()))\n \nif __name__ == \"__main__\":\n sqlCtx, spark = udf.getSparkConfig(SPARK_MASTER, \"Stage2:PurchaseCreditMemo\")\n purchase_PurchaseCreditMemo(sqlCtx, spark)","sub_path":"Linux/Navision2013/DB/Entity/Stage2/Script/Purchase/PurchaseCreditMemo.py","file_name":"PurchaseCreditMemo.py","file_ext":"py","file_size_in_byte":20785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"179447673","text":"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"trax trainer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport gin\nfrom tensor2tensor.trax import trax\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"dataset\", None, \"Which dataset to use.\")\nflags.DEFINE_string(\"model\", None, \"Which model to train.\")\nflags.DEFINE_string(\"data_dir\", None, \"Path to the directory with data.\")\nflags.DEFINE_string(\"output_dir\", None,\n \"Path to the directory to save logs and checkpoints.\")\nflags.DEFINE_multi_string(\"config_file\", None,\n \"Configuration file with parameters (.gin).\")\nflags.DEFINE_multi_string(\"config\", None,\n \"Configuration parameters (gin string).\")\n\n\ndef _setup_gin():\n configs = FLAGS.config or []\n # Override with --dataset and --model\n if FLAGS.dataset:\n configs.append(\"train.dataset='%s'\" % FLAGS.dataset)\n if FLAGS.model:\n configs.append(\"train.model=@\" + FLAGS.model)\n gin.parse_config_files_and_bindings(FLAGS.config_file, configs)\n\n\ndef _default_output_dir():\n \"\"\"Default output directory.\"\"\"\n dir_name = \"{model_name}_{dataset_name}_{timestamp}\".format(\n model_name=gin.query_parameter(\"train.model\").configurable.name,\n dataset_name=gin.query_parameter(\"train.dataset\"),\n timestamp=datetime.datetime.now().strftime(\"%Y%m%d_%H%M\"),\n )\n dir_path = os.path.join(\"~\", \"trax\", dir_name)\n print()\n trax.log(\"No --output_dir specified\")\n return dir_path\n\n\ndef main(_):\n _setup_gin()\n\n # Setup directories\n data_dir = FLAGS.data_dir\n output_dir = FLAGS.output_dir or _default_output_dir()\n assert data_dir, \"Must specify a data directory\"\n assert output_dir, \"Must specify an output directory\"\n trax.log(\"Using --output_dir %s\" % output_dir)\n\n data_dir = os.path.expanduser(data_dir)\n output_dir = os.path.expanduser(output_dir)\n\n trax.train(data_dir=data_dir, output_dir=output_dir)\n\n\nif __name__ == \"__main__\":\n logging.set_verbosity(logging.INFO)\n app.run(main)\n","sub_path":"tensor2tensor/trax/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"125875998","text":"class Stack:\r\n # Constructor that creates an empty stack (list).\r\n def __init__(self):\r\n self.myStack = []\r\n\r\n # Check if the stack is empty or not.\r\n def isEmpty(self):\r\n if len(self.myStack) == 0:\r\n return True\r\n else:\r\n return False\r\n\r\n # Push method to insert a value in stack.\r\n def push(self, value):\r\n self.myStack.append(value)\r\n\r\n # Pop method to remove last added value from stack.\r\n def pop(self):\r\n if self.isEmpty():\r\n return \"Stack is Empty\"\r\n else:\r\n self.myStack.pop()\r\n\r\n # Peek method to return the last added element in stack.\r\n def peek(self):\r\n if self.isEmpty():\r\n return \"Stack is Empty\"\r\n else:\r\n return self.myStack[-1]\r\n \r\n # clearStack method deletes all values from a stack\r\n def clearStack(self):\r\n self.myStack.clear()\r\n\r\n\r\nstack = Stack()\r\n\r\nstack.push(1)\r\nstack.push(2)\r\nstack.push(3)\r\nstack.push(4)\r\n\r\nprint(stack.peek())\r\n\r\nstack.pop()\r\n\r\nprint(stack.peek())\r\n\r\nstack.clearStack()\r\n\r\nprint(stack.peek())","sub_path":"Stack (using Python List and No Size Limit).py","file_name":"Stack (using Python List and No Size Limit).py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"23203269","text":"import pandas as pd\nimport json\nimport urllib.request\nfrom bs4 import BeautifulSoup as soup \n\ndef writeToJSONFile(path, fileName, data):\n df = pd.io.json.json_normalize(data)\n out = df.to_json(orient='records')\n filePathNameWExt = path + '/' + fileName + '.json'\n\n with open(filePathNameWExt, 'w') as fp:\n json.dump(data, fp)\n\n with open(filePathNameWExt, 'w') as f:\n f.write(out)\n\n with open('D:/MyCodes/Python/DataScraping/Seleksi-2018/Tugas1/data/notnormal.json', 'w') as p:\n json.dump(data, p)\n \n\ndef addDentist(data, ID, dentist):\n for i in data:\n if i == ID:\n return\n data[ID] = dentist\n\nif __name__ == '__main__':\n\n headers = {'user-agent' : 'Mozilla/5.0 (X11; Linux x86_64); Basis Data/Admin Basis Data/basisdata@std.stei.itb.ac.id'}\n\n page = 1\n ID = 0\n data = dict()\n \n dentist = dict()\n\n while page <= 9: \n my_url = 'https://www.alodokter.com/cari-dokter/dokter-gigi/page/'+str(page)\n req = urllib.request.Request(my_url, None, headers)\n response = urllib.request.urlopen(req)\n page_html = response.read()\n\n page_soup = soup(page_html, \"html.parser\")\n\n doclist = page_soup.findAll(\"div\", {\"class\": \"row row-doctor\"})\n \n for doc in doclist:\n ID += 1\n doctor_name = doc.findAll(\"div\", {\"class\": \"doctor-name\"})\n doctor_spec = doc.findAll(\"div\", {\"class\": \"doctor-speciality\"})\n findHospital = doc.findAll(\"div\", {\"class\": \"px16 doctor-address\"})\n findLocation = doc.findAll(\"div\", {\"class\": \"px16 doctor-desc\"})\n findTarif = doc.findAll(\"div\", {\"class\": \"tindakan-desc px16\"})\n name = doctor_name[0].text.strip()\n speciality = doctor_spec[0].text.strip()\n hospital = findHospital[0].text.strip()\n location = findLocation[0].text.strip()\n tarif = findTarif[0].text.strip()\n\n dentist = { \n \"Nama Dokter\" : name, \n \"Spesialis\" : speciality, \n \"Tempat Praktik\" : hospital, \n \"Lokasi\" : location,\n \"Tarif\" : tarif\n }\n addDentist(data, str(ID), dentist)\n\n print(\"=============================\")\n print(\"ID :\" + str(ID))\n print(\"Nama Dokter : \" + name)\n print(\"Spesialis : \" + speciality)\n print(\"Tempat praktik : \" + hospital)\n print(\"Lokasi : \" + location)\n print(\"Tarif : \" + tarif)\n page += 1\n writeToJSONFile('D:/MyCodes/Python/DataScraping/Seleksi-2018/Tugas1/data', 'JSONData', data)","sub_path":"Tugas1/src/Tugas1.py","file_name":"Tugas1.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"301327608","text":"'''\nBuatlah suatu program yang akan membuka file “iniContoh.txt” dan menghitung jumlah kata \nyang terdapat pada file tersebut. Simpan hasil yang didapat pada sebuah variabel.  \nKemudian, tulislah sebuah kalimat kedalam sebuah file (.txt) baru “jumlahKata.txt”  \nmenggunakan python dengan isi sebagai berikut:  \nJumlah kata yang terhitung pada file “iniContoh.txt” adalah (​hasil​) \n'''\n\ninfile = open('iniContoh.txt') \n\nisi = infile.read() \n\n#split the words and make it a list\nkata = isi.split() \n\n#count the length of the list\nhitung = len(kata) \n\n#write text into the file\nwith open(\"jumlahKata.txt\", \"w\") as isinya: # Menginput string pada file jumlahKata.txt\n isinya.write('Jumlah kata yang terhitung pada file “iniContoh.txt” adalah ' + str(hitung))\n\n\n\nprint(\"Lihat file jumlahKata.txt\")\n","sub_path":"Tutorial Lab/Lab02/Task 3.py","file_name":"Task 3.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"44899730","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Film, DodatkoweInfo, Ocena, Aktor\nfrom .forms import FilmForm, DodatkoweInfoForm, OcenaForm, AktorForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm\nimport os\nfrom rest_framework import viewsets\nfrom django.contrib.auth.models import User\nfrom .serializers import UserSerializer, FilmSerializer\n\nclass UserView(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\nclass FilmView(viewsets.ModelViewSet):\n queryset = Film.objects.all()\n serializer_class = FilmSerializer\n\n\n\ndef wszystkie_filmy(request):\n wszystkie = Film.objects.all()\n return render(request, 'filmy.html', {'filmy': wszystkie})\n\n@login_required\ndef nowy_film(request):\n form_film = FilmForm(request.POST or None, request.FILES or None)\n form_dodatkowe = DodatkoweInfoForm(request.POST or None)\n if all((form_film.is_valid(), form_dodatkowe.is_valid())):\n film = form_film.save(commit=False)\n dodatkowe = form_dodatkowe.save()\n film.dodatkowe = dodatkowe\n form_film.save()\n return redirect(wszystkie_filmy)\n context = {\n 'form': form_film,\n 'form_dodatkowe': form_dodatkowe,\n 'oceny': None,\n 'form_ocena': None,\n 'nowy': True,\n 'actor_form': None\n }\n return render(request, 'film_form.html', context)\n\n@login_required\ndef edytuj_film(request, id):\n film = get_object_or_404(Film, pk=id)\n oceny = Ocena.objects.filter(film=film)\n\n try:\n dodatkowe = DodatkoweInfo.objects.get(film=film.id)\n except DodatkoweInfo.DoesNotExist:\n dodatkowe = None\n\n form_film = FilmForm(request.POST or None, request.FILES or None, instance=film)\n form_dodatkowe = DodatkoweInfoForm(request.POST or None, instance=dodatkowe)\n form_ocena = OcenaForm(None)\n actor_form = AktorForm(request.POST or None)\n if request.method == 'POST':\n if 'gwiazdki' in request.POST:\n ocena = form_ocena.save(commit=False)\n ocena.film = film\n ocena.save()\n\n if all((form_film.is_valid(), form_dodatkowe.is_valid())):\n film = form_film.save(commit=False)\n dodatkowe = form_dodatkowe.save()\n film.dodatkowe = dodatkowe\n film.save()\n return redirect(wszystkie_filmy)\n\n\n if request.method == \"POST\":\n if 'imie' in request.POST:\n actor = actor_form.save()\n actor.filmy.add(film)\n return redirect(wszystkie_filmy)\n\n context = {\n 'form': form_film,\n 'form_dodatkowe': form_dodatkowe,\n 'oceny': oceny,\n 'form_ocena': form_ocena,\n 'nowy': False,\n 'actor_form': actor_form,\n\n }\n\n return render(request, 'film_form.html', context)\n\n@login_required\ndef delete_film(request, id):\n film = get_object_or_404(Film, pk=id)\n\n if request.method == \"POST\":\n film.delete()\n if film.plakat:\n if os.path.isfile(film.plakat.path):\n os.remove(film.plakat.path)\n return redirect(wszystkie_filmy)\n\n return render(request, 'potwierdz.html', {'film': film})\n\n\ndef film_detail(request, id):\n film = get_object_or_404(Film, pk=id)\n actors = film.aktorzy.all()\n oceny = Ocena.objects.filter(film=film)\n ocena_form = OcenaForm(request.POST or None)\n\n if request.method == \"POST\":\n if 'stars' in request.POST:\n ocena = ocena_form.save(commit=False)\n ocena.film = film\n ocena.save()\n return redirect(wszystkie_filmy)\n\n context = {\n 'film': film,\n 'oceny': oceny,\n 'ocena_form': ocena_form,\n 'actors': actors\n }\n return render(request, 'film_detail.html', context)\n\ndef register(request):\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=password)\n login(request, user)\n return redirect(wszystkie_filmy)\n return render(request, 'register.html', {'form': form})","sub_path":"filmy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"583916638","text":"import os\nimport json\n\n\nclass BlogInsert:\n\n def __init__(self, path, col, params, file_suffix):\n self.path = path\n self.col = col\n self.params = params\n self.file_suffix = file_suffix\n\n def trigger_import(self):\n files = self.get_json_files()\n\n for file in files:\n insert_list, update_list = self.get_json_data(file)\n self.bulk_insert(insert_list)\n self.update_blog(update_list)\n\n if os.path.exists(file):\n os.remove(file)\n\n def bulk_insert(self, recs):\n if recs:\n self.col.insert_many(recs)\n\n def update_blog(self, recs):\n for rec in recs:\n # data_dict = {\"story_id\": rec[\"story_id\"]}\n data_dict = {self.params: rec[self.params]}\n self.col.find_one_and_replace(data_dict, rec)\n\n def get_json_data(self, file):\n insert_list = []\n update_list = []\n\n with open(file) as json_file:\n recs = json.load(json_file)\n if isinstance(recs, list):\n for rec in recs:\n data_dict = {self.params: rec[self.params]}\n res = self.col.find(data_dict).count()\n if res:\n update_list.append(rec)\n else:\n insert_list.append(rec)\n\n return insert_list, update_list\n\n def get_json_files(self):\n list_files = os.listdir(self.path)\n json_list = []\n\n for item in list_files:\n if item.endswith(self.file_suffix):\n file_path = os.path.join(self.path, item)\n json_list.append(file_path)\n\n return json_list\n","sub_path":"blog_insert.py","file_name":"blog_insert.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"94597127","text":"import command_server\n\ndef info(*args):\n message = 'Йо, смотри пока как тут все устроено:\\n'\n for c in command_server.command_list:\n message += c.keys[0] + ' - ' + c.description + '\\n'\n return message, ''\n\ninfo_command = command_server.Command()\ninfo_command.keys = ['хелп', 'помощь', 'помоги', 'help']\ninfo_command.description = 'сопсно какие команды поддерживаются'\ninfo_command.process = info\n","sub_path":"commands/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"583368801","text":"#!/usr/bin/python\n# Copyright (C) 2011 McAfee, Inc. All rights reserved.\n# TestcaseID: 13238\n# TestcaseDescription: To verify that McAfeeSecurity.log file capture the detected infection information\n\nimport sys\nimport logging\nimport time\nimport subprocess\nimport os\n\n# Import CommonTest module into current namespace\ncommon_path=os.path.dirname(os.path.abspath(sys.argv[0]))\nsys.path.append(common_path + \"/../../Common\")\nsys.path.append(common_path + \"/../../AntiMalware\")\n\nimport commonFns\nimport CommonOASFns\nimport CommonAntiMalwareFns\n\n# Import CommonTest module into current namespace\nfrom CommonTest import *\n\nMAX_LIMIT = 100\n\n# Get testcase name\ntestcaseName = sys.argv[0][:-3]\n\nclass TestCase(BaseTest):\n _infectDirPath = common_path + \"/data/OAStestEicar\"\n _infectFilePath = common_path + \"/data/OAStestEicar/OASInfect.\"\n\n def __init__(self):\n logging.info(\"TestcaseID : 13238\")\n logging.info(\"To verify that McAfeeSecurity.log file capture the detected infection information\")\n\n def init(self):\n logging.info(\"Initializing testcase %s\" % testcaseName)\n\n # Call the common initialization check\n _retval = BaseTest.init(self)\n if _retval != 0 :\n return _retval\n\n logging.debug(\"Installing AntiMalwareTestTool\")\n if CommonAntiMalwareFns.installAntiMalwareTestTool() != True :\n logging.error(\"Failed to install AntiMalwareTestTool\")\n return 1\n\n CommonOASFns.resetToDefaults()\n return 0\n \n\n def execute(self):\n logging.info(\"Executing testcase %s\" % testcaseName)\n\n # AV-Primary Action - Notify\n if CommonOASFns.setAVPrimaryAction('0') != True:\n logging.error(\"Unable to set AV-PrimaryAction\")\n return 1\n\n if CommonOASFns.enableScanOnRead() !=True:\n logging.error(\"Unable to enable Scan On Read\")\n return 1 \n \n if CommonOASFns.disableScanOnWrite() !=True:\n logging.error(\"Unable to disable Scan On Write\")\n return 1\n\n time.sleep(5)\n #Step 1: Creating large number of infected files \n os.mkdir(self._infectDirPath)\n _i=0\n while _i < MAX_LIMIT :\n _i=_i+1\n if CommonOASFns.createEicarInfection(self._infectFilePath+str(_i)) == False:\n logging.error(\"Unable to create infected file.\")\n return 1\n\n return 0\n\n def verify(self):\n logging.info(\"Verifying testcase %s\" % testcaseName)\n #Step 2: Accessing the all infected file and verifying the McAfeeSecurity.log\n _i=0\n while _i < MAX_LIMIT :\n _i=_i+1\n _cmd = \"cat \" + self._infectFilePath+str(_i)\n _retVal = subprocess.call([\"/bin/sh\", \"-c\", _cmd],stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n #time.sleep(5)\n if _retVal != 0 :\n _regex=\"Infection found: \"+self._infectFilePath+str(_i)+\" is infected with EICAR test file, type of infection\"\\\n +\" is Test-Virus\\(NOT A VIRUS\\), accessed by cat, action taken is Notified\"\n if commonFns.searchProductLog(_regex) != True:\n logging.error(\"Regex is not matching with Notified acction\")\n return 1\n logging.info(\"All infections all Successfully notified in McAfeeSecurity.log\")\n return 0\n\n def cleanup(self):\n logging.info(\"Performing cleanup for testcase %s\" % testcaseName)\n # Copy logs.\n foundCrash = 0\n foundCrash = commonFns.copyLogs()\n # Set Product Prefernces to Defaults\n CommonOASFns.resetToDefaults()\n # Clean the logs\n commonFns.cleanLogs()\n\n # Remove the all infected files(directory) from the disk\n logging.debug(\"Removing %s from disk\" % self._infectDirPath)\n\n _cmd=\"rm -rf \"+ self._infectDirPath\n _rt=subprocess.call([_cmd],shell=True)\n if _rt != 0 :\n logging.error(\"Failed to remove %s from filesystem\"%self._infectDirPath)\n return 1\n\n if foundCrash != 0:\n logging.error(\"copylogs returned failure status. Maybe a product crash\")\n\n return foundCrash\n\n def __del__(self):\n pass\n\nif __name__ == \"__main__\":\n # Setup testcase\n setupTestcase(sys.argv)\n\n testObj = TestCase()\n\n # Perform testcase operations\n retVal = testObj.init()\n\n # Perform execute once initialization succeeds...\n if(retVal == 0):\n retVal = testObj.execute()\n\n # Once execution succeeds, perform verification...\n if(retVal == 0):\n retVal = testObj.verify()\n\n # Perform testcase cleanup\n retVal += testObj.cleanup()\n\n if(retVal == 0):\n resultString = \"PASS\"\n else:\n resultString = \"FAIL\"\n\n logging.info(\"Result of testcase %s: %s\" % (testcaseName, resultString) )\n sys.exit(retVal)\n","sub_path":"McAfee/src/TestAutomation/Testcases/FVT/AntiMalware/OAS/OAS_OAS_1.py","file_name":"OAS_OAS_1.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"567961842","text":"# this is to learn different ways of creating fibonacci number/list\n\n# if only need to get one number\n# recursive way (recursion)\ndef fib_gen_r(i):\n \"\"\"\n Fibonacci function generator\n generate the fibonacci number at 'i'th posistion\n \"\"\"\n if i == 0:\n return 0\n elif i == 1:\n return 1\n else:\n return fib_gen_r(i - 1) + fib_gen_r(i - 2)\n\nif __name__ == '__main__':\n print(fib_gen_r(7)) # >>> 13\n print()\n\n\n# [0, 1, 1, 2, 3, 5, 8, 13]\n# 0 1 2 3 4 5 6 7\n\n# non-recursive way\n# actually non-recursive way is much more efficient!!!\ndef fib_gen_nr(i):\n \"\"\"\n Fibonacci function generator\n generate the fibonacci number at 'i'th posistion\n \"\"\"\n lst = [0, 1]\n if i == 0:\n return lst[0:1]\n elif i == 1:\n return lst[0:2]\n elif i >= 2:\n for j in range(2, i+1):\n temp = lst[j-1] + lst[j-2]\n lst.append(temp)\n return lst[-1]\n\nif __name__ == '__main__':\n print(fib_gen_nr(7)) # >>> 13\n print()\n\n\n\n# if need to output a list of fibonacci numbers till 'i'th position\n# only have Non-recursive way\n# just need to revise the nr method slightly\ndef fiblist_gen_nr(i):\n \"\"\"\n Fibonacci function generator\n generate the fibonacci list till 'i'th posistion\n \"\"\"\n lst = [0, 1]\n if i == 0:\n return lst[0:1]\n elif i == 1:\n return lst[0:2]\n elif i >= 2:\n for j in range(2, i+1):\n temp = lst[j-1] + lst[j-2]\n lst.append(temp)\n return lst\n\nif __name__ == '__main__':\n print(fiblist_gen_nr(7)) # >>> [0, 1, 1, 2, 3, 5, 8, 13]\n print()\n\n# 新写法\n\ndef fib_gen_nr2(i):\n a, b = 0, 1\n for x in range(i):\n a, b = b, a+b # 必须是同排,这样会调用上一次的a,b值,a值不会由于a=b后改变!\n return a\n\nif __name__ == '__main__':\n print(fib_gen_nr2(7)) # >>> 13\n print()\n\n\ndef fiblist_gen_nr2(i):\n lst = [0]\n a, b = 0, 1\n for x in range(i):\n a, b = b, a+b\n lst.append(a)\n return lst\n\nif __name__ == '__main__':\n print(fiblist_gen_nr2(7)) # >>> [0, 1, 1, 2, 3, 5, 8, 13]\n print()\n\n\n\n# use generator in fibonacci function\ndef genFib(): # this created a generator for fibonacci numbers\n a, b = 0, 1\n yield a\n yield b\n while True:\n next = a + b\n yield next\n a, b = b, next\n\n# print a list of any length for fibonacci numbers\ndef fib_gen_nr3(x):\n \"\"\"x is the threshold to stop when a fib number is larger than it\"\"\"\n for n in genFib():\n if n >= x:\n break\nif __name__ == '__main__':\n fib_gen_nr3(13)\n# >>>\n# 0\n# 1\n# 1\n# 2\n# 3\n# 5\n# 8\n# 13\n","sub_path":"ZCodeSnippets/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"549868308","text":"import numpy as np\r\nimport json\r\nimport operator\r\n\r\n#class_num = 345\r\nclass_num = 50\r\ntr_arr = [200, 200, 200, 200, 200]\r\nts_arr = [50, 50, 50, 50, 50]\r\ntr_mrg = [0, 250, 500, 750, 1000, 1250]\r\nts_mrg = [200, 450, 700, 950, 1200]\r\n\r\npath = \"class_index.json\"\r\ndataset_dir_path = \"../../quickdraw/numpy_bitmap/\"\r\nwith open(path, mode=\"r\") as f:\r\n meta = json.load(f)\r\n\r\ntrain_data = [0]*len(tr_arr)\r\ntrain_labels = [0]*len(tr_arr)\r\neval_data = [0]*len(tr_arr)\r\neval_labels = [0]*len(tr_arr)\r\ndiv_arr = np.frompyfunc(operator.truediv,2,1)\r\n\r\nfor num in range(0, len(tr_arr)):\r\n train_data[num] = np.zeros((class_num,tr_arr[num],784), dtype=np.float32)\r\n train_labels[num] = np.zeros((class_num,tr_arr[num]), dtype=np.int32)\r\n eval_data[num] = np.zeros((class_num,ts_arr[num],784), dtype=np.float32)\r\n eval_labels[num] = np.zeros((class_num,ts_arr[num]), dtype=np.int32)\r\n\r\nfor idx in range(0, class_num):\r\n file_name = meta[idx]\r\n datas = np.load(dataset_dir_path+file_name)\r\n for num in range(0, len(tr_arr)):\r\n train_data[num][idx] = div_arr(datas[tr_mrg[num]:ts_mrg[num]], 255.0)\r\n train_labels[num][idx] = np.full((tr_arr[num]), fill_value=idx)\r\n eval_data[num][idx] = div_arr(datas[ts_mrg[num]:tr_mrg[num+1]], 255.0)\r\n eval_labels[num][idx] = np.full((ts_arr[num]), fill_value=idx)\r\n print(file_name)\r\n\r\nfor num in range(0, len(tr_arr)):\r\n dataset_dir = \"c50/dataset\"+str(num)+\"/\"\r\n np.save(dataset_dir+\"train_data.npy\", train_data[num].reshape((class_num*tr_arr[num],784)))\r\n np.save(dataset_dir+\"train_labels.npy\", train_labels[num].reshape((class_num*tr_arr[num])))\r\n np.save(dataset_dir+\"eval_data.npy\", eval_data[num].reshape((class_num*ts_arr[num],784)))\r\n np.save(dataset_dir+\"eval_labels.npy\", eval_labels[num].reshape((class_num*ts_arr[num])))\r\n","sub_path":"generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"425774659","text":"from django.db import reset_queries\nfrom django.http import request\nfrom django.shortcuts import render\nfrom django.utils import timezone\n#modelsの前の. = 同じディレクトリ 拡張子(.py)は必要ない\nfrom .models import Post\n# Page Not Found 404 ページ\nfrom django.shortcuts import render, get_object_or_404\n# 投稿ページ\nfrom .forms import PostForm\n# 新しく作成された投稿ページを表示\nfrom django.shortcuts import redirect\n\n# Create your views here.\n\n#post_listという関数を作成 引数はrequest\ndef post_list(request):\n #クエリセットを作成 変数はposts\n #公開したBLOG記事を並べ替え\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n return render(request, 'blog/post_list.html', {'posts': posts})\n \n#リクエストされたpk(プライマリキー)のページを表示する \ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n\n# 新しい投稿ページを表示する\ndef post_new(request):\n# 空白のフォームか、フォームデータが入力された状態かで分岐する\n# 『POST』は送られてきたものを意味する変数\n if request.method == \"POST\":\n form = PostForm(request.POST)\n # 必須フィールドがきちんと入力されているかチェック\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, 'blog/post_edit.html', {'form':form})\n ","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"71817303","text":"## https://qedinsight.wordpress.com/2011/05/13/the-coconut-problem/\r\n\r\ndef process(x):\r\n original = x\r\n for i in range(5):\r\n if x % 5 == 1:\r\n x = ((x-1)/5)*4\r\n else:\r\n return False\r\n if x % 5 == 1:\r\n print (\"{} works!\".format(original))\r\n return True\r\n else:\r\n return False\r\n\r\n\r\na = 1\r\n\r\nwhile True:\r\n p = process(a)\r\n if p:\r\n break\r\n a += 1\r\n","sub_path":"Python/Sailors Coconuts Monkey problem/SailorsCoconutsMonkey.py","file_name":"SailorsCoconutsMonkey.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"230929960","text":"from pippi import dsp\nfrom pippic import settings\nfrom pippic import rt\nfrom termcolor import colored\nimport multiprocessing as mp\nimport json\nimport time\n\n\"\"\"\nregister: low - high\ndensity: thin - thick\nharmonicity: aharmonic - harmonic\nroughness: smooth - rough\npace: slow - fast\n\"\"\"\n\ndsp.log('loaded orange bot')\n\ndef mc(r, numpoints):\n numlands = dsp.randint(5, 20)\n return dsp.breakpoint([ dsp.rand(r[0], r[1]) for i in range(numlands) ], numpoints)\n\ndef make_section(zone):\n numpoints = dsp.randint(20, 50)\n\n zone['register'] = mc(zone['register'], numpoints)\n zone['density'] = mc(zone['density'], numpoints)\n zone['harmonicity'] = mc(zone['harmonicity'], numpoints)\n zone['roughness'] = mc(zone['roughness'], numpoints)\n zone['pace'] = mc(zone['pace'], numpoints)\n\n return [ make_point(zone, i) for i in range(numpoints) ]\n\ndef make_point(zone, i):\n return {\n 'name': zone['name'],\n 'register': zone['register'][i],\n 'density': zone['density'][i],\n 'harmonicity': zone['harmonicity'][i],\n 'roughness': zone['roughness'][i],\n 'pace': zone['pace'][i],\n }\n\ndef make_telemetry():\n zones = [\n {\n 'name': ['sparse'],\n 'register': (7, 10),\n 'density': (1, 2),\n 'harmonicity': (1, 10),\n 'roughness': (1, 5),\n 'pace': (1, 3)\n }, {\n 'name': ['gentle'],\n 'register': (3, 7),\n 'density': (3, 8),\n 'harmonicity': (8, 10),\n 'roughness': (1, 2),\n 'pace': (1, 3)\n }, {\n 'name': ['ballsout'],\n 'register': (1, 10),\n 'density': (2, 10),\n 'harmonicity': (1, 10),\n 'roughness': (4, 10),\n 'pace': (3, 10)\n }, {\n 'name': ['upbeat'],\n 'register': (5, 8),\n 'density': (1, 8),\n 'harmonicity': (8, 10),\n 'roughness': (2, 4),\n 'pace': (4, 7)\n }\n ]\n\n dsp.log('generating telemetry...')\n\n numsections = dsp.randint(10, 20)\n sections = []\n\n for s in range(numsections):\n zone = dsp.randchoose(zones)\n section = make_section(zone)\n sections += section\n\n # Transition\n if dsp.rand(0, 100) > 20:\n next_zone = dsp.randchoose(zones)\n\n if next_zone['name'] != zone['name']:\n next_section = make_section(next_zone)\n\n transition_zone = {\n 'name': ['transition', section[-1]['name'][0], next_section[0]['name'][0]],\n 'register': (section[-1]['register'], next_section[0]['register']),\n 'density': (section[-1]['density'], next_section[0]['density']),\n 'harmonicity': (section[-1]['harmonicity'], next_section[0]['harmonicity']),\n 'roughness': (section[-1]['roughness'], next_section[0]['roughness']),\n 'pace': (section[-1]['pace'], next_section[0]['pace']),\n }\n\n transition_section = make_section(transition_zone)\n\n sections += transition_section\n sections += next_section\n\n dsp.log('telemetry generated')\n settings.shared('tel', sections)\n\ndef run(gens, tick):\n dsp.log('telemetry up!')\n started = time.time()\n\n def worker(gens, tick):\n while time.time() < started + (60 * 12.5):\n dsp.delay(dsp.stf(dsp.rand(2, 20)))\n\n if dsp.rand(0, 100) > 60:\n v = dsp.rand(0, 100)\n\n if v > 65:\n voice_id, generator_name = settings.add_voice('ch re qu')\n dsp.log('')\n dsp.log('starting chirp voice %s' % voice_id)\n elif v > 35:\n voice_id, generator_name = settings.add_voice('bi re qu')\n dsp.log('')\n dsp.log('starting bird voice %s' % voice_id)\n else:\n voice_id, generator_name = settings.add_voice('bo re qu')\n dsp.log('')\n dsp.log('starting boone voice %s' % voice_id)\n\n\n playback_process = mp.Process(name=voice_id, target=rt.out, args=(gens[generator_name], tick))\n playback_process.start()\n\n dsp.delay(dsp.stf(dsp.rand(6, 35)))\n\n dsp.log('')\n dsp.log('stopping voice %s' % voice_id)\n settings.voice(voice_id, 'loop', 0)\n\n for w in range(10):\n # Spawn worker\n worker_process = mp.Process(name='worker', target=worker, args=(gens, tick))\n worker_process.start()\n\ndef show_telemetry(tel):\n output = [] \n\n for k, v in tel.iteritems():\n if k == 'register':\n color = 'blue'\n\n elif k == 'density':\n color = 'red'\n\n elif k == 'harmonicity':\n color = 'green'\n\n elif k == 'roughness':\n color = 'cyan'\n\n elif k == 'pace':\n color = 'yellow'\n\n else:\n color = 'white'\n\n if k == 'name':\n output += [ colored(' '.join(v), color) ]\n else:\n output += [ colored('%s: %.2f' % (k[:3], v), color) ]\n\n output = ' | '.join(output)\n dsp.log(output)\n\ndef getTel():\n tel = json.loads(settings.shared('tel'))\n count = int(settings.shared('count'))\n tel = tel[count % len(tel)]\n\n return tel\n","sub_path":"bots/orange.py","file_name":"orange.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"173546733","text":"import numpy as np\nimport sympy as sy\nimport scipy as sp\n\nclass SkeletonOfBeam:\n \"\"\"\n Get skeleton of beam\n\n Arributes:\n mesh: Input trimesh.mesh\n centroid: An array of the coordinates of mesh's centroid\n nVec: Initial skeleton vector\n \n Intersections: A list of trimesh.Path3D\n SkeletonPoints: A list of np.array\n \n XYZCoordinate: A new coordinate which use nVec as the x-axis\n XYProjections: projections of SkeletonPoints on the x-y plane of XYZCoordinate\n XZProjections: projections of SkeletonPoints on the x-z plane of XYZCoordinate\n \n Used packages:\n import numpy as np\n import sympy as sy\n import scipy as sp\n \"\"\"\n mesh = None\n centroid = None\n nVec = None\n \n IntersectionScale = None # scale along the nVec to get Intersections\n Intersections = []\n Intersections2D = []\n SkeletonPoints = [] # centroids of the intersections\n \n XYZCoordinate = None # 3*3 array, first line = x-axis ...\n XYProjections = []\n XZProjections = []\n coorOrigin = None\n \n L = None\n \n u_xyPlane = None\n u_xzPlane = None\n alpha_xyPlane = None\n alpha_xzPlane = None\n # Sympy derivation of the projection of skeleton curve on x-y plane\n # Calculate the derivation value by using dudx_xyPlane.evalf(subs={'xi': xi_value})\n # where xi_value in [0, 1]\n dudx_xyPlane = None\n dudx_xzPlane = None # similar to dudx_xyPlane\n \n def __init__(self, mesh, rough_normalVector):\n self.mesh = mesh\n self.centroid = mesh.centroid.copy()\n # 质心截面的一个大致的法向量(目前单指 [1, 0, 0])\n self.nVec = np.asarray(rough_normalVector)\n \n \n def getScaleAlongSkeletonVec(self):\n \"\"\"Get the scale along the input skeleton vector, which then serve for taking intersections\n \"\"\"\n currentScale = 0.2\n \n # Get a rough scale\n ifSliceExist = True \n while ifSliceExist:\n originPoint = self.centroid + currentScale * self.nVec\n slice = self.mesh.section(plane_origin=originPoint, plane_normal=self.nVec) # take the slice\n if slice is None:\n ifSliceExist = False\n else:\n currentScale = currentScale * 2\n \n # Get the accurate sacles\n scales = [0, 0]\n \n # Binary Search for positive half\n leftS = currentScale / 2\n rightS = currentScale\n while rightS - leftS > 1e-2:\n midS = (leftS + rightS) / 2\n originPoint = self.centroid + midS * self.nVec\n slice = self.mesh.section(plane_origin=originPoint, plane_normal=self.nVec)\n if slice is None: rightS = midS\n else: leftS = midS\n scales[1] = leftS\n \n # Binary Search for negative half\n leftS = -currentScale\n rightS = -currentScale / 2\n while rightS - leftS > 1e-2:\n midS = (leftS + rightS) / 2\n originPoint = self.centroid + midS * self.nVec\n slice = self.mesh.section(plane_origin=originPoint, plane_normal=self.nVec)\n if slice is None: leftS = midS\n else: rightS = midS\n scales[0] = rightS\n \n self.IntersectionScale = np.asarray(scales)\n \n \n \n def getIntersectionsFromStep(self, step=1, valve=1e-3):\n \"\"\"Get the intersections of beam along nVec\n \n :param step: interval of intersections\n \"\"\"\n sections = []\n extents = self.IntersectionScale # 截取区间\n levels = np.arange(*extents, step=step) # 每隔 1m 截一次\n for i in range(len(levels)):\n origin_temp = self.centroid.copy()\n origin_temp[0] = origin_temp[0] + levels[i]\n try:\n slice = self.mesh.section(plane_origin=origin_temp, plane_normal=self.nVec)\n # 选取每个截面图中面积最大的子图,实现初步去噪\n if slice is not None:\n slice_2D, to_3D = slice.to_planar()\n slices_splited = slice_2D.split()\n sliceIndex = np.argmax([s.area for s in slices_splited])\n slice_2D = slices_splited[sliceIndex]\n if slice_2D.area > valve:\n sections.append(slice_2D.to_3D(to_3D))\n except:\n pass\n \n self.Intersections = sections\n \n def getIntersectionsFromSliceNum(self, sliceNum=5, valve=1e-3):\n \"\"\"Get the intersections of beam along nVec\n \n :param sliceNum: number of intersections\n \"\"\"\n sections = []\n extents = self.IntersectionScale # 截取区间\n levels = np.linspace(*extents, sliceNum) # 每隔 1m 截一次\n for i in range(len(levels)):\n origin_temp = self.centroid.copy()\n origin_temp[0] = origin_temp[0] + levels[i]\n try:\n slice = self.mesh.section(plane_origin=origin_temp, plane_normal=self.nVec)\n # 选取每个截面图中面积最大的子图,实现初步去噪\n if slice is not None:\n slice_2D, to_3D = slice.to_planar()\n slices_splited = slice_2D.split()\n sliceIndex = np.argmax([s.area for s in slices_splited])\n slice_2D = slices_splited[sliceIndex]\n if slice_2D.area > valve:\n sections.append(slice_2D.to_3D(to_3D))\n except:\n pass\n \n self.Intersections = sections\n \n \n def getSkeletonPoints(self): \n \"\"\"Get the centroids of intersections\n \"\"\"\n self.SkeletonPoints = []\n for s in self.Intersections:\n self.SkeletonPoints.append(s.centroid)\n \n def getNewCoordinate(self):\n \"\"\"Get new XYZ coordinate with vector nVec as x-axis\n \"\"\"\n R = GeometryToolBox.rotation_matrix_from_vectors([1, 0, 0], self.nVec)\n x = self.nVec\n y = np.dot(R, np.array([0, 1, 0]))\n z = np.dot(R, np.array([0, 0, 1]))\n self.XYZCoordinate = np.vstack((x, y, z))\n return self.XYZCoordinate\n \n def getProjections(self):\n \"\"\"Get the projections of the SkeletonPoints on x-y, x-z plane\n \"\"\" \n x, y, z = self.XYZCoordinate\n origin = self.SkeletonPoints[0]\n self.coorOrigin = origin\n self.XYProjections = [GeometryToolBox.projected_point(p, origin, x, y) for p in self.SkeletonPoints]\n self.XZProjections = [GeometryToolBox.projected_point(p, origin, x, z) for p in self.SkeletonPoints]\n \n \n def getSkeletonEqs(self):\n \"\"\"Get the equation of the skeleton curve in x-y, x-z plane\n \"\"\"\n xs = np.array(self.XYProjections)[:,0]\n ys = np.array(self.XYProjections)[:,1]\n zs = np.array(self.XZProjections)[:,1]\n\n L = xs[-1] - xs[0]\n self.L = L\n xis = (xs - xs[0]) / L\n\n errorValue = lambda x,y,A: y - np.dot(A, x)\n a_init = np.array([1] * 4)\n\n # Calculate the derivation equation on x-y plane\n # Get the optimal parameters using least squre error method\n a1 = sp.optimize.leastsq(errorValue, a_init, args=(ys, self._H(xis, L)))[0]\n self.alpha_xyPlane = a1\n \n # Derivation\n xi = sy.symbols('xi')\n self.u_xyPlane = (self._H(xi, L, ifsymbol=True) * a1).sum()\n \n # Then calculate the derivation equation on x-z plane\n a2 = sp.optimize.leastsq(errorValue, a_init, args=(zs, self._H(xis, L)))[0]\n self.alpha_xzPlane = a2\n self.u_xzPlane = (self._H(xi, L, ifsymbol=True) * a2).sum()\n \n \n def getDerivativeSkeletonEqs(self):\n \"\"\"Get the derivation of the skeleton curve in x-y, x-z plane\n \"\"\"\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n \n # Derivation\n xi = sy.symbols('xi')\n self.dudx_xyPlane = sy.diff(self.u_xyPlane, xi) / L\n \n # Then calculate the derivation equation on x-z plane\n self.dudx_xzPlane = sy.diff(self.u_xzPlane, xi) / L\n \n def getNewSkeletonPoints(self):\n \"\"\"Get new centroids according to the curve equations\n \"\"\"\n xVec, yVec, zVec = self.XYZCoordinate\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n xis = xs / L\n\n self.SkeletonPoints = []\n for i in range(len(xis)):\n xi_value = xis[i]\n sX = xs[i]\n sY = self.u_xyPlane.evalf(subs={'xi': xi_value})\n sZ = self.u_xzPlane.evalf(subs={'xi': xi_value})\n self.SkeletonPoints.append(self.coorOrigin + sX*xVec + sY*yVec + sZ*zVec) #coordinate of the bar origin is added\n \n def getNewIntersections(self):\n \"\"\"Get the intersections of beam along vector [1, 0, 0]\n \n :param step: interval of intersections\n \"\"\"\n sections = []\n sections2D = []\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n xis = xs / L\n R = self.XYZCoordinate.T\n if len(xis) != len(self.SkeletonPoints):\n raise Exception(\"Conflit between xis and SkeletonPoints.\", self.SKeletonPoints)\n for i in range(len(xis)):\n xi = xis[i]\n normalVec = self.returnTangentVectorAtXi(xi)\n originPoint = self.SkeletonPoints[i]\n \n # define transform manually\n T = np.zeros((4,4))\n T[:3,:3] = R[:,np.array([1,2,0])]; # 3rd axis is reduced by projection\n T[:3,3] = originPoint; \n T[3,3] = 1.0\n to_2D = (np.linalg.inv(T)).astype(float)\n \n try:\n slice = self.mesh.section(plane_origin=originPoint, plane_normal=normalVec)\n # 选取每个截面图中面积最大的子图,实现初步去噪\n if slice is not None:\n slice_2D, to_3D = slice.to_planar(to_2D=to_2D,check=False)\n slices_splited = slice_2D.split()\n sliceIndex = np.argmax([s.area for s in slices_splited])\n slice_2D = slices_splited[sliceIndex]\n if True: #slice_2D.area > 1e-1:\n sections2D.append(slice_2D)\n sections.append(slice_2D.to_3D(to_3D))\n except:\n pass\n \n self.Intersections2D = sections2D\n self.Intersections = sections \n \n def returnTangentVectorAtXi(self, xi_value):\n \"\"\"Return the tangent vector at xi=xi_value\n \"\"\"\n xVec, yVec, zVec = self.XYZCoordinate\n\n sY = self.dudx_xyPlane.evalf(subs={'xi': xi_value}) # scalar y\n sZ = self.dudx_xzPlane.evalf(subs={'xi': xi_value}) # scalar z\n \n tanVec = xVec + sY*yVec + sZ*zVec\n \n return tanVec.astype(np.float64)\n \n def returnSkeletonPointsInXiRange(self, xi_s, xi_e, pNum=10):\n points = []\n \n xVec, yVec, zVec = self.XYZCoordinate\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n \n xis = np.linspace(xi_s, xi_e, pNum)\n xs = xis * L\n for i in range(len(xis)):\n xi_value = xis[i]\n sX = xs[i]\n sY = self.u_xyPlane.evalf(subs={'xi': xi_value})\n sZ = self.u_xzPlane.evalf(subs={'xi': xi_value})\n points.append(sX*xVec + sY*yVec + sZ*zVec)\n \n return np.asarray(points).astype(np.float64)\n \n def returnAvgIntersectionArea(self):\n return 0\n \n \n \n def showIntersections(self, ifOverlapped=True):\n \"\"\"Visualize the intersections in one graph\n params:\n ifOverlapped: if True, all intersections will be plotted into one graph\n else Flase, each intersection will be plotted in order\n \"\"\"\n if ifOverlapped:\n combined = np.sum(self.Intersections2D)\n combined.show()\n else:\n for c2 in self.Intersections2D:\n c2.show()\n \n \n def _H(self, xs, L, ifsymbol=False): \n h2 = 1 - 3*xs**2 + 2*xs**3\n h3 = xs*(1 - 2*xs + xs**2)*L\n h5 = xs**2*(3 - 2*xs)\n h6 = xs**2*(xs - 1)*L\n if ifsymbol:\n return np.array([h2, h3, h5, h6])\n else:\n return np.hstack((h2.reshape(len(xs), -1), h3.reshape(len(xs), -1), \n h5.reshape(len(xs), -1), h6.reshape(len(xs), -1)))\n \n def H(xs, L, ifsymbol=False): \n h2 = 1 - 3*xs**2 + 2*xs**3\n h3 = xs*(1 - 2*xs + xs**2)*L\n h5 = xs**2*(3 - 2*xs)\n h6 = xs**2*(xs - 1)*L\n if ifsymbol:\n return np.array([h2, h3, h5, h6])\n else:\n return np.hstack((h2.reshape(len(xs), -1), h3.reshape(len(xs), -1), \n h5.reshape(len(xs), -1), h6.reshape(len(xs), -1)))\n \n \n#----------------------------------------------------------------------------------------------------\n\nclass GeometryToolBox:\n def rotation_matrix(vS, vE):\n \"\"\"\n \"\"\"\n vS = np.asarray(vS) / np.linalg.norm(vS) # 归一化\n vE = np.asarray(vE) / np.linalg.norm(vE)\n axis = np.cross(vS, vE)\n theta = np.arccos(np.dot(vS, vE))\n\n a = np.cos(theta / 2.0)\n b, c, d = -axis * np.sin(theta / 2.0)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n\n return np.array([[aa+bb-cc-dd, 2*(bc + ad), 2*(bd - ac)],\n [2*(bc - ad), aa+cc-bb-dd, 2*(cd + ab)],\n [2*(bd + ac), 2*(cd - ab), aa+dd-bb-cc]])\n \n def rotation_matrix_from_vectors(vec1, vec2):\n a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)\n v = np.cross(a, b)\n c = np.dot(a, b)\n s = np.linalg.norm(v)\n if s > 1e-15:\n kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))\n else:\n rotation_matrix = np.eye(3)\n return rotation_matrix\n\n\n def projected_point(point, plane_origin, planeVec1, planeVec2):\n \"\"\"\n 已知平面内一原点以及两个正交向量,求已知点在该平面内的投影坐标(二维)\n \"\"\"\n pVec = np.asarray(point) - np.asarray(plane_origin)\n xVec = np.asarray(planeVec1) / np.linalg.norm(planeVec1)\n yVec = np.asarray(planeVec2) / np.linalg.norm(planeVec2)\n s1 = np.dot(pVec, xVec)\n s2 = np.dot(pVec, yVec)\n\n return [s1, s2]\n \n#----------------------------------------------------------------------------------------------------\n\nclass SegsOfSOB:\n mSob = None\n mesh = None \n n = None\n \n aYs = []\n aZs = []\n \n nSobs = []\n \n us_xyPlane = []\n us_xzPlane = []\n dudxs_xyPlane = []\n dudxs_xyPlane = []\n \n def __init__(self, motherSob, n):\n self.mSob = motherSob\n self.mesh = motherSob.mesh.copy()\n self.n = n\n \n def getSobs(self):\n xVec, yVec, zVec = self.mSob.XYZCoordinate\n xis = np.linspace(0, 1, 10)\n segL = self.mSob.L / self.n\n for i in range(self.n):\n print(\"Getting {}th sub-beam's Info...\".format(i))\n sY = self.dudxs_xyPlane[i].evalf(subs={'xi': 0}) # scalar y\n sZ = self.dudxs_xzPlane[i].evalf(subs={'xi': 0}) # scalar z\n nVec = xVec + sY*yVec + sZ*zVec\n\n sp = []\n for j in range(len(xis)):\n xi_value = xis[j]\n sX = xi_value * segL + segL * i\n sY = self.us_xyPlane[i].evalf(subs={'xi': xi_value})\n sZ = self.us_xzPlane[i].evalf(subs={'xi': xi_value})\n sp.append((sX*xVec + sY*yVec + sZ*zVec).astype('float'))\n \n sob = SkeletonOfBeam(self.mesh, np.array(nVec).astype('float'))\n sob.SkeletonPoints = sp\n \n print(\"Constructing {}th sub-beam...\".format(i))\n sob.getNewCoordinate()\n sob.getProjections()\n sob.getSkeletonEqs()\n sob.getDerivativeSkeletonEqs()\n\n sob.getNewSkeletonPoints()\n sob.getNewIntersections()\n print(\"Done.\")\n self.nSobs.append(sob)\n \n def getNewA(self):\n n = self.n\n m = 4*n + 1\n sob = self.mSob\n\n L = self.mSob.L\n u_xy = self.mSob.u_xyPlane\n u_xz = self.mSob.u_xzPlane\n\n # Get global xi\n if m > 2:\n dis = 1/(m-1)\n xis_global = [(i+1)*dis for i in range(m-2)]\n xis_global = [0] + xis_global + [1]\n\n # Get local xi\n # and indexes for separations\n xis_local = (np.array(xis_global) % (1/n)) / (1/n)\n xis_local[-1] = 1\n separateIndex = [0]\n for i in range(len(xis_local)):\n if i == 0: pass\n else:\n if xis_local[i] <= xis_local[i-1]:\n separateIndex.append(i)\n separateIndex.append(-1)\n\n # Get u in y and z\n uYs = []\n uZs = []\n for xi_value in xis_global:\n uY = u_xy.evalf(subs={'xi': xi_value})\n uZ = u_xz.evalf(subs={'xi': xi_value})\n uYs.append(uY)\n uZs.append(uZ)\n\n # Get N\n N = np.zeros((m, 2*(n+1)))\n segL = L/n\n for i in range(len(separateIndex)-1): \n xis = xis_local[separateIndex[i]:separateIndex[i+1]]\n if separateIndex[i+1] == -1:\n xis = np.append(xis, xis_local[-1])\n\n if separateIndex[i+1] == -1:\n N[separateIndex[i]:, i*2:] = SkeletonOfBeam.H(xis, segL)\n else:\n N[separateIndex[i]:separateIndex[i+1], i*2:i*2+4] = SkeletonOfBeam.H(xis, segL)\n \n a_init = np.array([1] * 2*(n+1))\n errorValue = lambda x,y,A: y - np.dot(A, x)\n \n self.aYs = sp.optimize.leastsq(errorValue, a_init, args=(np.asarray(uYs).astype('float'), np.asarray(N).astype('float')))[0]\n self.aZs = sp.optimize.leastsq(errorValue, a_init, args=(np.asarray(uZs).astype('float'), np.asarray(N).astype('float')))[0]\n # self.aYs = np.dot(np.linalg.pinv(N), uYs).astype('float')\n # self.aZs = np.dot(np.linalg.pinv(N), uZs).astype('float')\n \n def getNewEqs(self):\n self.us_xyPlane = []\n self.us_xzPlane = []\n xi = sy.symbols('xi')\n segL = self.mSob.L / self.n\n for i in range(self.n):\n if i == self.n - 1:\n aY = self.aYs[i*2:]\n aZ = self.aZs[i*2:]\n else:\n aY = self.aYs[i*2:i*2+4]\n aZ = self.aZs[i*2:i*2+4]\n self.us_xyPlane.append((SkeletonOfBeam.H(xi, segL, ifsymbol=True) * aY).sum())\n self.us_xzPlane.append((SkeletonOfBeam.H(xi, segL, ifsymbol=True) * aZ).sum())\n \n def getNewDerivativeEqs(self):\n xi = sy.symbols('xi')\n segL = self.mSob.L / self.n\n self.dudxs_xyPlane = [sy.diff(u, xi) / segL for u in self.us_xyPlane]\n self.dudxs_xzPlane = [sy.diff(u, xi) / segL for u in self.us_xzPlane]","sub_path":"_ZK/.ipynb_checkpoints/SkeletonOfBeam-checkpoint.py","file_name":"SkeletonOfBeam-checkpoint.py","file_ext":"py","file_size_in_byte":19502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"470528558","text":"#!/usr/bin/env python\n#\n# Copyright @2014 blackshirtmuslim@yahoo.com\n# Licensed: see Python license\n\n\"\"\"Main entry for Application\"\"\"\n\nimport os\nimport uuid\nimport base64\n\nimport tornado.web\nimport tornado.wsgi\nimport tornado.ioloop\nimport tornado.httpserver\n\nfrom tornado.options import define, options\n\nfrom dompetku.handler import transaksi, login, register, home, user\nfrom dompetku.handler import services\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\n\n\nclass Application(tornado.wsgi.WSGIApplication):\n def __init__(self):\n handler = [\n (r\"/\", home.HomeHandler),\n (r\"/trans\", transaksi.ListTransaksiHandler),\n (r\"/trans/all\", transaksi.TransaksiHandler),\n (r\"/transactions\", transaksi.ListTrans),\n (r\"/services/trans\", services.ApiTransactions),\n (r\"/trans/([0-9]*)\", transaksi.TransaksiByIdHandler),\n (r\"/trans/create\", transaksi.CreateTransaksiHandler),\n (r\"/trans/insert\", transaksi.InsertTransaksiHandler),\n (r\"/trans/([0-9]*)/edit\", transaksi.EditTransaksiHandler),\n (r\"/trans/([0-9]*)/delete\", transaksi.DeleteTransaksiHandler),\n (r\"/auth/login\", login.LoginHandler),\n (r\"/auth/logout\", login.LogoutHandler),\n (r\"/user/([0-9]*)/edit\", user.UserHandler),\n (r\"/register\", register.RegistrasiHandler),\n (r\"/api/check/user\", login.CheckUserExistHandler),\n (r\"/api/check/user/available\", login.CheckIfUserAvailable),\n (r\"/api/check/password\", login.CheckPasswordHandler),\n ]\n\n settings = dict(\n blog_title=\"Your online Pocket\",\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n # ui_modules={\"Entry\": EntryModule},\n xsrf_cookies=False,\n cookie_secret=base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes),\n login_url=\"/auth/login\",\n debug=True,\n )\n\n tornado.wsgi.WSGIApplication.__init__(self, handler, **settings)\n\n # Have one global connection to the blog DB across all handlers\n # self.db = model.database\n\n\n# Issue : locale.Error: local query failed\n# based on this suggestion http://code.google.com/p/python-for-android/issues/detail?id=1\n# and this snippet from http://code.google.com/p/python-for-android/issues/attachmentText?id=1&aid=8862727350419203445&\n# name=monkey_locale.py&token=ABZ6GAcpG3dWuh_F9FOJ5TnNxsz3o_XeGA%3A1411486676371\ndef patch_locale():\n # noinspection PyUnusedLocal,PyUnusedLocal,PyUnusedLocal\n def getlocale(*args, **kwargs):\n return None, None\n\n import locale\n\n locale.getlocale = getlocale\n\n\ndef main():\n tornado.options.parse_command_line()\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\n\napplication = Application()\n\nif __name__ == \"__main__\":\n patch_locale()\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"530496152","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport py_compile\npy_compile.compile('./new_image_plugin.py')\n\nfrom gimpfu import *\n\n# https://www.gimp.org/docs/python/index.html#INTRODUCTION\n\n# GIMP procedural database (PDB) Doku:\n# http://oldhome.schmorp.de/marc/pdb/index.html\n\n# https://wiki.python.org/moin/PythonMagick\n\n# plugins location on Max OSX:\n# /Applications/GIMP-2.10.app/Contents/Resources/lib/gimp/2.0/plug-ins\n\ndef erstelle_neues_image(image, ebene):\n width = 600\n height = 400\n\n # Ein neues Image erzeugen.\n image = pdb.gimp_image_new(width, height, RGB)\n # Für das Image mindestens eine Ebene erzeugen und die Ebene hinzufügen.\n ebene = pdb.gimp_layer_new(\n image, width, height, RGB, \"ebenen-name\", 100, NORMAL_MODE)\n pdb.gimp_image_insert_layer(image, ebene, None, 1)\n # Eine Hintergrundfarbe auswählen und das Image damit füllen.\n pdb.gimp_context_set_background((255,138,0))\n pdb.gimp_drawable_fill(ebene, BACKGROUND_FILL)\n # Das neu erzeugte Image anzeigen.\n pdb.gimp_display_new(image)\n\n\nregister(\n \"erstelle_neues_image\", # Plugin Name\n \"Neues Image erstellen\", # Kurzbeschreibung\n \"Ein neues Image erstellen\", # Längere Beschreibung\n \"Mele Melewo\", # Plugin Autor\n \"MIT-Lizenz\", # Angaben zur Lizenz\n \"2017\", # Jahr der Veröffentlichung\n \"/Filters/Neues Image\", # Position im Menü mit Label\n \"*\", # Akzeptierte Image-Typen\n [], # Input Parameter\n [], # Output Resultate\n erstelle_neues_image # Name der Funktion\n )\n\nmain()","sub_path":"thomas/gimp/new_image_plugin.py","file_name":"new_image_plugin.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"192756402","text":"from wtforms import SubmitField\nfrom search_plugins.search_plugin_manager import FormPlugin, HiddenFieldPlugin, StringFieldPlugin, RadioFieldPlugin, \\\n CheckboxFieldPlugin\n\n\nclass PluginForm(FormPlugin):\n query = StringFieldPlugin('Search', id='search-plugin-query')\n school = HiddenFieldPlugin(match_stmt=\"(item)-[:IN_SCHOOL]->(school:Item)\",\n where_stmt=\"school.id = {school}\")\n level = RadioFieldPlugin(match_stmt=\"(item)-[:UOG_LEVEL]->(level:Item {id:{level}})\",\n where_stmt=\"level.id = {level}\",\n label='Level',\n choices=[(18327237, \"Level 1\"),\n (18327238, \"Level 2\"),\n (18327239, \"Level 3\"),\n (18327241, \"Level 4\"),\n (18327243, \"Level 5\")],\n coerce=unicode,\n )\n submit = SubmitField('Search')\n\n def __init__(self, *args, **kwargs):\n super(FormPlugin, self).__init__(*args, **kwargs)\n self.item_id_field = self.school\n self.checkbox_fields = [getattr(self, field, None) for field in dir(self)\n if isinstance(getattr(self, field), CheckboxFieldPlugin)]\n self.radio_fields = [getattr(self, field, None) for field in dir(self)\n if isinstance(getattr(self, field), RadioFieldPlugin)]\n self.hidden_fields = [getattr(self, field, None) for field in dir(self) if\n isinstance(getattr(self, field), HiddenFieldPlugin) and not\n getattr(self, field) == self.item_id_field]\n","sub_path":"search_plugins/uog/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"468855076","text":"import time\r\nfrom math import sin, log\r\nfrom local_settings import *\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium import webdriver\r\n\r\n\r\nlink = \"http://suninjuly.github.io/explicit_wait2.html\"\r\n\r\ntry:\r\n browser = webdriver.Firefox(\r\n executable_path=ep, \r\n firefox_binary=fb\r\n )\r\n browser.get(link)\r\n button = WebDriverWait(browser, 12).until(\r\n EC.text_to_be_present_in_element((By.ID, \"price\"), '100')\r\n ) \r\n browser.find_element_by_id(\"book\").click()\r\n button = browser.find_element_by_id(\"solve\")\r\n browser.execute_script(\"return arguments[0].scrollIntoView(true);\", button)\r\n x = int(browser.find_element_by_id(\"input_value\").text)\r\n res = str(log(abs(12*sin(x))))\r\n browser.find_element_by_id(\"answer\").send_keys(res)\r\n xp = '//button[contains(text(), \"Submit\")]'\r\n button = browser.find_element_by_xpath(xp)\r\n button.click() \r\n a = browser.switch_to.alert\r\n print(a.text)\r\nfinally:\r\n time.sleep(5)\r\n browser.quit()","sub_path":"stepikMod2.py","file_name":"stepikMod2.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"406331341","text":"\"\"\"Sample LSTM Code\n\nThis script runs a generic LSTM network on the features in your training data and\nmakes predictions based on your test data. It will output a model, and save and\nload your model.\n\n * add_layer - adds a layer to the model\n * build - builds a model when passed an array of layers\n * load - loads an existing model and weights\n * run - uses above functions to build a model on training data and saves it to file\n\"\"\"\n\nimport numpy as np\nimport process\nimport tensorflow as tf\n\nfrom ann_visualizer.visualize import ann_viz\nfrom functools import reduce\nfrom keras import backend as K, regularizers\nfrom keras.callbacks import TensorBoard\nfrom keras.layers import LSTM, Dense, InputLayer, Dropout, Flatten\nfrom keras.metrics import categorical_accuracy\nfrom keras.models import Sequential, model_from_json\nfrom keras.optimizers import SGD, Adam\nfrom math import ceil\nfrom time import time\n\ndef add_layer(model, nodes):\n activation = 'relu' if nodes != 1 else 'linear'\n name = 'Layer{layer_num}'.format(layer_num=len(model.layers))\n with tf.name_scope(name):\n layer = Dense(\n units=nodes,\n activation=activation,\n ) if len(model.layers) else LSTM(nodes,\n input_shape=(1, nodes),\n return_sequences=True,\n go_backwards=True,\n dropout=0.3,\n recurrent_dropout=0.3,\n )\n\n needs_flatten = not len(model.layers)\n model.add(layer)\n if needs_flatten:\n model.add(Flatten())\n\n return model\n\n\ndef build(layers, predict_value):\n model = Sequential()\n\n model = reduce(add_layer, layers, model)\n loss = 'mean_squared_error' if predict_value else 'binary_crossentropy'\n metrics = ['accuracy'] if predict_value else [categorical_accuracy]\n with tf.name_scope(\"loss\"):\n optimizer = Adam(lr=0.001)\n model.compile(optimizer=optimizer,\n loss=loss, metrics=metrics)\n\n return model\n\ndef load(json_file='model_lstm.json', h5_file='model_lstm.h5'):\n json = open(json_file, 'r')\n model_json = json.read()\n json.close()\n\n model = model_from_json(model_json)\n model.load_weights(h5_file)\n\n with tf.name_scope(\"loss\"):\n optimizer = Adam(lr=0.001)\n model.compile(optimizer=optimizer,\n loss='mean_squared_error', metrics=['accuracy'])\n\n return model\n\ndef run(data=None, predict_value=False, epochs=5):\n \"\"\"Trains a LSTM network on inputed data\n Parameters\n ----------\n data : DataSet\n DataSet object filled with scaled and split data ready for consumption\n predict_values : Boolean\n Set to true if you want to predict the value of the stock or false if you want to categorize it\n epochs : Int\n How many epochs you want the network to run\n Returns\n -------\n keras.engine.sequential.Sequential\n A keras model we can evaluate and make predictions on\n \"\"\"\n if data is None:\n data = process.run(None, 'MSFT', predict_value)\n\n K.clear_session()\n\n num_features = data.X_train.shape[1]\n first_layer = ceil(num_features / 2)\n second_layer = ceil(first_layer / 2)\n\n model = build([num_features, 1], predict_value)\n\n tensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()),\n histogram_freq=1,\n batch_size=5,\n write_graph=True,\n write_grads=True,\n write_images=False,\n embeddings_freq=0,\n embeddings_layer_names=None,\n embeddings_metadata=None\n )\n\n y_labelled = data.y_train * 1\n print(model.summary())\n\n # reshape according to number of timesteps (1 in this case)\n data.group_timesteps(1)\n\n if (predict_value):\n model.fit(\n data.X_train,\n y_labelled,\n batch_size=1,\n epochs=epochs,\n validation_split=0.3,\n shuffle=False,\n callbacks=[tensorboard],\n )\n else:\n model.fit(\n data.X_train,\n y_labelled,\n batch_size=1,\n epochs=epochs,\n validation_split=0.3,\n shuffle=False,\n class_weight=data.class_weights(),\n callbacks=[tensorboard],\n )\n\n # ann_viz(model, view=True, filename=\"network.gv\", title=\"MyNeural Network\")\n\n json = model.to_json()\n with open(\"model_lstm.json\", \"w\") as file:\n file.write(json)\n model.save_weights(\"model_lstm.h5\")\n\n return model\n","sub_path":"lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"253715705","text":"\"\"\"This file collects some parameters for the controller of the quad.\nThe parameters are not defined directly here, but they are defined in a launch\nfile.\nThis file takes out the parameters from the launch file, but also gives a\ndefault value for each parameter.\nIn this way, ROS is able to change the values of the parameters online, if\ndesired.\nFor example, we do such changes in the GUI.\n\"\"\"\n\nimport rospy\nimport numpy as np\n\n\n# gravity\ngravity = rospy.get_param(\"gravity_ctr\", 9.81)\n\n# quad mass\nquad_mass = rospy.get_param(\"quad_mass_ctr\", 1.442)\n\n# vertical\ne3 = np.array([0.0, 0.0, 1.0])\n\n# controller gains\nomega_n = 1.0\nxi = np.sqrt(2) / 2\nkv = rospy.get_param(\"kv\", 2.0 * xi * omega_n)\nkp = rospy.get_param(\"kp\", omega_n**2)\n\n# controller thresholds\nsp = rospy.get_param(\"sp\", 1.0)\nsv = rospy.get_param(\"sv\", 1.0)\n\n# angular velocity sensitivity for the acro mode\nacro_rpp = rospy.get_param(\"acro_rpp_ctr\", 4.5)\n\n# yaw gain\nk_yaw = rospy.get_param(\"k_yaw_ctr\", 3.0)\n\n# maximum roll and pitch angle\nmax_tilt_deg = rospy.get_param(\"max_tilt_deg_ctr\", 45.0)\n\n# maximum yaw rate\nmax_yaw_rate_deg = rospy.get_param(\"max_yaw_rate_deg_ctr\", 200.0)\n\n# neutral control\nneutral_throttle = 1484\nneutral_cmd = (1500, 1500, neutral_throttle, 1500)\n\n# conventional initial position of a quad\ninit_pos = np.array([0.0, 0.0, 1.0])\n\n# minimum acknowledged throttle\nmin_throttle = 0.1\n","sub_path":"src/controllers/iris_controller_parameters.py","file_name":"iris_controller_parameters.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"205671728","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nimport msrest.serialization\n\n\nclass ManagedPrivateEndpoint(msrest.serialization.Model):\n \"\"\"Managed private endpoint.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Fully qualified resource Id for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or\n Microsoft.Storage/storageAccounts.\n :vartype type: str\n :param properties: Managed private endpoint properties.\n :type properties:\n ~azure.synapse.managedprivateendpoints.v2021_06_01_preview.models.ManagedPrivateEndpointProperties\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'properties': {'key': 'properties', 'type': 'ManagedPrivateEndpointProperties'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(ManagedPrivateEndpoint, self).__init__(**kwargs)\n self.id = None\n self.name = None\n self.type = None\n self.properties = kwargs.get('properties', None)\n\n\nclass ManagedPrivateEndpointConnectionState(msrest.serialization.Model):\n \"\"\"The connection state of a managed private endpoint.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar status: The approval status.\n :vartype status: str\n :param description: The managed private endpoint description.\n :type description: str\n :param actions_required: The actions required on the managed private endpoint.\n :type actions_required: str\n \"\"\"\n\n _validation = {\n 'status': {'readonly': True},\n }\n\n _attribute_map = {\n 'status': {'key': 'status', 'type': 'str'},\n 'description': {'key': 'description', 'type': 'str'},\n 'actions_required': {'key': 'actionsRequired', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(ManagedPrivateEndpointConnectionState, self).__init__(**kwargs)\n self.status = None\n self.description = kwargs.get('description', None)\n self.actions_required = kwargs.get('actions_required', None)\n\n\nclass ManagedPrivateEndpointListResponse(msrest.serialization.Model):\n \"\"\"A list of managed private endpoints.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :param value: List of managed private endpoints.\n :type value:\n list[~azure.synapse.managedprivateendpoints.v2021_06_01_preview.models.ManagedPrivateEndpoint]\n :ivar next_link: The link to the next page of results, if any remaining results exist.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n 'next_link': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[ManagedPrivateEndpoint]'},\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(ManagedPrivateEndpointListResponse, self).__init__(**kwargs)\n self.value = kwargs.get('value', None)\n self.next_link = None\n\n\nclass ManagedPrivateEndpointProperties(msrest.serialization.Model):\n \"\"\"Properties of a managed private endpoint.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :param name: The name of managed private endpoint.\n :type name: str\n :param private_link_resource_id: The ARM resource ID of the resource to which the managed\n private endpoint is created.\n :type private_link_resource_id: str\n :param group_id: The groupId to which the managed private endpoint is created.\n :type group_id: str\n :ivar provisioning_state: The managed private endpoint provisioning state.\n :vartype provisioning_state: str\n :param connection_state: The managed private endpoint connection state.\n :type connection_state:\n ~azure.synapse.managedprivateendpoints.v2021_06_01_preview.models.ManagedPrivateEndpointConnectionState\n :ivar is_reserved: Denotes whether the managed private endpoint is reserved.\n :vartype is_reserved: bool\n :param fqdns: List of fully qualified domain names.\n :type fqdns: list[str]\n :param is_compliant: Denotes whether the managed private endpoint is compliant.\n :type is_compliant: bool\n \"\"\"\n\n _validation = {\n 'provisioning_state': {'readonly': True},\n 'is_reserved': {'readonly': True},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'private_link_resource_id': {'key': 'privateLinkResourceId', 'type': 'str'},\n 'group_id': {'key': 'groupId', 'type': 'str'},\n 'provisioning_state': {'key': 'provisioningState', 'type': 'str'},\n 'connection_state': {'key': 'connectionState', 'type': 'ManagedPrivateEndpointConnectionState'},\n 'is_reserved': {'key': 'isReserved', 'type': 'bool'},\n 'fqdns': {'key': 'fqdns', 'type': '[str]'},\n 'is_compliant': {'key': 'isCompliant', 'type': 'bool'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(ManagedPrivateEndpointProperties, self).__init__(**kwargs)\n self.name = kwargs.get('name', None)\n self.private_link_resource_id = kwargs.get('private_link_resource_id', None)\n self.group_id = kwargs.get('group_id', None)\n self.provisioning_state = None\n self.connection_state = kwargs.get('connection_state', None)\n self.is_reserved = None\n self.fqdns = kwargs.get('fqdns', None)\n self.is_compliant = kwargs.get('is_compliant', None)\n","sub_path":"sdk/synapse/azure-synapse-managedprivateendpoints/azure/synapse/managedprivateendpoints/v2021_06_01_preview/models/_models.py","file_name":"_models.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"630417309","text":"# -*- coding: utf-8 -*-\n#################################################################################\n# Author : Acespritech Solutions Pvt. Ltd. ()\n# Copyright(c): 2012-Present Acespritech Solutions Pvt. Ltd.\n# All Rights Reserved.\n#\n# This program is copyright property of the author mentioned above.\n# You can`t redistribute it and/or modify it.\n#\n#################################################################################\nimport pytz\nfrom odoo import models, fields, api\nfrom pytz import timezone\nfrom datetime import datetime, date\n\n\nclass StockLocation(models.Model):\n _inherit = 'stock.location'\n\n def get_current_date_x(self):\n if self.env.user.tz:\n tz = timezone(self.env.user.tz)\n else:\n tz = pytz.utc\n if tz:\n c_time = datetime.now(tz)\n return c_time.strftime('%d/%m/%Y')\n else:\n return date.today().strftime('%d/%m/%Y')\n\n def get_current_time_x(self):\n if self.env.user.tz:\n tz = timezone(self.env.user.tz)\n else:\n tz = pytz.utc\n if tz:\n c_time = datetime.now(tz)\n return c_time.strftime('%I:%M %p')\n else:\n return datetime.now().strftime('%I:%M:%S %p')\n\n def get_inventory_details(self):\n product_product = self.env['product.product']\n pos_session = self.env['pos.session'].search([])\n inventory_records = []\n final_list = []\n product_details = []\n for line in pos_session.mapped('picking_ids').filtered(\n lambda picking: picking.location_id.id == self.id).mapped('move_line_ids'):\n product_details.append({\n 'id': line.product_id.id,\n 'qty': line.qty_done,\n })\n custom_list = []\n for each_prod in product_details:\n if each_prod.get('id') not in [list_custom.get('id') for list_custom in custom_list]:\n custom_list.append(each_prod)\n else:\n for each in custom_list:\n if each.get('id') == each_prod.get('id'):\n each.update({'qty': each.get('qty') + each_prod.get('qty')})\n if custom_list:\n for each in custom_list:\n product_id = product_product.browse(each.get('id'))\n inventory_records.append({\n 'product_id': [product_id.id, product_id.name],\n 'category_id': [product_id.id, product_id.categ_id.name],\n 'used_qty': each.get('qty'),\n 'quantity': product_id.with_context({'location': self.id, 'compute_child': False}).qty_available,\n 'uom_name': product_id.uom_id.name or ''\n })\n if inventory_records:\n temp_list = []\n temp_obj = []\n for each in inventory_records:\n if each.get('product_id')[0] not in temp_list:\n temp_list.append(each.get('product_id')[0])\n temp_obj.append(each)\n else:\n for rec in temp_obj:\n if rec.get('product_id')[0] == each.get('product_id')[0]:\n qty = rec.get('quantity') + each.get('quantity');\n rec.update({'quantity': qty})\n final_list = sorted(temp_obj, key=lambda qty: qty['quantity'])\n return final_list or []\n\n def get_warehouse_expiry_detail(self, company_id):\n quant_sql = '''\n SELECT \n sq.location_id as location_id, \n sum(sq.quantity) as expire_count, \n sw.name as warehouse_name\n FROM \n stock_warehouse sw\n LEFT JOIN stock_location sl on sl.id = sw.lot_stock_id\n LEFT JOIN stock_quant sq on sq.location_id = sl.id\n WHERE \n sq.state_check = 'Near Expired'\n AND sw.company_id = %s\n GROUP BY \n sq.location_id,sw.name;\n ''' % (company_id)\n self._cr.execute(quant_sql)\n warehouse_near_expire = self._cr.dictfetchall()\n return warehouse_near_expire\n\n def get_location_detail(self, company_id):\n quant_sql = '''\n SELECT \n sq.location_id as location_id, \n sum(sq.quantity) as expire_count , \n sl.complete_name as location_name\n FROM \n stock_quant sq\n LEFT JOIN stock_location sl on sl.id = sq.location_id\n WHERE \n sl.usage = 'internal' AND \n sl.company_id = %s AND \n sl.active = True AND \n sq.state_check = 'Near Expired'\n GROUP BY \n sq.location_id,sl.complete_name\n ''' % (company_id)\n self._cr.execute(quant_sql)\n location_near_expire = self._cr.dictfetchall()\n return location_near_expire\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"flexipharmacy_ee/models/stock_location.py","file_name":"stock_location.py","file_ext":"py","file_size_in_byte":5126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"34899115","text":"# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nWhen you give the AWS KMS keyring specific key IDs it will use those CMKs and nothing else.\nThis is true both on encrypt and on decrypt.\nHowever, sometimes you need more flexibility on decrypt,\nespecially when you don't know which CMKs were used to encrypt a message.\nTo address this need, you can use an AWS KMS discovery keyring.\nThe AWS KMS discovery keyring does nothing on encrypt.\nOn decrypt it reviews each encrypted data key (EDK).\nIf an EDK was encrypted under an AWS KMS CMK,\nthe AWS KMS discovery keyring attempts to decrypt it.\nWhether decryption succeeds depends on permissions on the CMK.\nThis continues until the AWS KMS discovery keyring either runs out of EDKs\nor succeeds in decrypting an EDK.\n\nHowever, sometimes you need to be a *bit* more restrictive than that.\nTo address this need, you can use a client supplier to restrict what regions an AWS KMS keyring can talk to.\n\nA more complex but more common use-case is that you would *prefer* to stay within a region,\nbut you would rather make calls to other regions than fail to decrypt the message.\nIn this case, you want a keyring that will try to decrypt data keys in this region first,\nthen try other regions.\n\nThis example shows how to configure and use a multi-keyring with the AWS KMS keyring\nto prefer the current AWS region while also failing over to other AWS regions.\n\nhttps://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/choose-keyring.html#use-kms-keyring\n\nFor an example of how to use the AWS KMS keyring with CMKs in multiple regions,\nsee the ``keyring/aws_kms/multiple_regions`` example.\n\nFor examples of how to use the AWS KMS keyring with custom client configurations,\nsee the ``keyring/aws_kms/custom_client_supplier``\nand ``keyring/aws_kms/custom_kms_client_config`` examples.\n\nFor examples of how to use the AWS KMS discovery keyring on decrypt,\nsee the ``keyring/aws_kms/discovery_decrypt``\nand ``keyring/aws_kms/discovery_decrypt_in_region_only`` examples.\n\"\"\"\nfrom boto3.session import Session\n\nimport aws_encryption_sdk\nfrom aws_encryption_sdk.keyrings.aws_kms import AwsKmsKeyring\nfrom aws_encryption_sdk.keyrings.aws_kms.client_suppliers import AllowRegionsClientSupplier, DenyRegionsClientSupplier\nfrom aws_encryption_sdk.keyrings.multi import MultiKeyring\n\n\ndef run(aws_kms_cmk, source_plaintext):\n # type: (str, bytes) -> None\n \"\"\"Demonstrate configuring an AWS KMS discovery-like keyring a particular AWS region and failover to others.\n\n :param str aws_kms_cmk: The ARN of an AWS KMS CMK that protects data keys\n :param bytes source_plaintext: Plaintext to encrypt\n \"\"\"\n # Prepare your encryption context.\n # Remember that your encryption context is NOT SECRET.\n # https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/concepts.html#encryption-context\n encryption_context = {\n \"encryption\": \"context\",\n \"is not\": \"secret\",\n \"but adds\": \"useful metadata\",\n \"that can help you\": \"be confident that\",\n \"the data you are handling\": \"is what you think it is\",\n }\n\n # Create the keyring that determines how your data keys are protected.\n encrypt_keyring = AwsKmsKeyring(generator_key_id=aws_kms_cmk)\n\n # To create our decrypt keyring, we need to know our current default AWS region.\n #\n # Create a throw-away boto3 session to discover the default region.\n local_region = Session().region_name\n\n # Now, use that region name to create two AWS KMS discovery keyrings:\n #\n # One that only works in the local region\n local_region_decrypt_keyring = AwsKmsKeyring(\n is_discovery=True, client_supplier=AllowRegionsClientSupplier(allowed_regions=[local_region])\n )\n # and one that will work in any other region but NOT the local region.\n other_regions_decrypt_keyring = AwsKmsKeyring(\n is_discovery=True, client_supplier=DenyRegionsClientSupplier(denied_regions=[local_region])\n )\n\n # Finally, combine those two keyrings into a multi-keyring.\n #\n # The multi-keyring steps through its member keyrings in the order that you provide them,\n # attempting to decrypt every encrypted data key with each keyring before moving on to the next keyring.\n # Because of this, other_regions_decrypt_keyring will not be called\n # unless local_region_decrypt_keyring fails to decrypt every encrypted data key.\n decrypt_keyring = MultiKeyring(children=[local_region_decrypt_keyring, other_regions_decrypt_keyring])\n\n # Encrypt your plaintext data.\n ciphertext, _encrypt_header = aws_encryption_sdk.encrypt(\n source=source_plaintext, encryption_context=encryption_context, keyring=encrypt_keyring\n )\n\n # Demonstrate that the ciphertext and plaintext are different.\n assert ciphertext != source_plaintext\n\n # Decrypt your encrypted data using the multi-keyring.\n #\n # You do not need to specify the encryption context on decrypt\n # because the header of the encrypted message includes the encryption context.\n decrypted, decrypt_header = aws_encryption_sdk.decrypt(source=ciphertext, keyring=decrypt_keyring)\n\n # Demonstrate that the decrypted plaintext is identical to the original plaintext.\n assert decrypted == source_plaintext\n\n # Verify that the encryption context used in the decrypt operation includes\n # the encryption context that you specified when encrypting.\n # The AWS Encryption SDK can add pairs, so don't require an exact match.\n #\n # In production, always use a meaningful encryption context.\n assert set(encryption_context.items()) <= set(decrypt_header.encryption_context.items())\n","sub_path":"examples/src/keyring/aws_kms/discovery_decrypt_with_preferred_regions.py","file_name":"discovery_decrypt_with_preferred_regions.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"423552699","text":"import htmlgenerator as hg\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .icon import Icon\n\n\nclass ProgressStep(hg.LI):\n STATUS = {\n \"warning\": \"warning\",\n \"current\": \"circle--filled\",\n \"complete\": \"checkmark--outline\",\n \"incomplete\": \"radio-button\",\n }\n\n def __init__(\n self, label, status, optional=False, tooltip=None, disabled=False, **kwargs\n ):\n assert (\n status in ProgressStep.STATUS\n ), f\"{status} must be one of {ProgressStep.STATUS}\"\n kwargs[\"class\"] = (\n kwargs.get(\"class\", \"\") + f\" bx--progress-step bx--progress-step--{status}\"\n )\n if disabled:\n kwargs[\"aria_disabled\"] = \"true\"\n kwargs[\"class\"] += \" bx--progress-step--disabled\"\n elements = [\n Icon(ProgressStep.STATUS[status], size=16),\n hg.P(label, tabindex=0, _class=\"bx--progress-label\"),\n hg.SPAN(_class=\"bx--progress-line\"),\n ]\n\n if optional:\n elements.insert(2, hg.P(_(\"Optional\"), _class=\"bx--progress-optional\"))\n\n if tooltip is not None:\n tooltipid = hg.html_id(tooltip, \"tooltip-label\")\n elements[1][\"aria-describedby\"] = tooltipid\n elements.insert(\n 2,\n hg.DIV(\n hg.SPAN(_class=\"bx--tooltip__caret\"),\n hg.P(tooltip, _class=\"bx--tooltip__text\"),\n id=tooltipid,\n role=\"tooltip\",\n data_floating_menu_direction=\"bottom\",\n _class=\"bx--tooltip\",\n data_avoid_focus_on_open=True,\n ),\n )\n\n super().__init__(*elements, **kwargs)\n\n\nclass ProgressIndicator(hg.UL):\n def __init__(self, steps, vertical=False, **kwargs):\n \"\"\"steps: lazy object or iterator of tuples in the form (step_name, step_status)\"\"\"\n kwargs[\"data_progress\"] = True\n kwargs[\"data_progress_current\"] = True\n kwargs[\"class\"] = (\n kwargs.get(\"class\", \"\")\n + \" bx--progress\"\n + (\" bx--progress--vertical\" if vertical else \"\")\n )\n self.steps = steps\n super().__init__(**kwargs)\n\n def render(self, context):\n steps = hg.resolve_lazy(self.steps, self, context)\n self.extend((ProgressStep(label, status) for label, status in steps))\n return super().render(context)\n\n\n\"\"\"\n
    \n\n
  • \n \n

    \n First step\n

    \n
    \n \n

    \n
    \n

    Optional

    \n \n
  • \n
  • \n \n \n \n

    \n Overflow Ex.1\n

    \n
    \n \n

    Overflow Ex.1

    \n
    \n \n
  • \n
  • \n \n \n \n

    \n Overflow Ex. 2 Multi Line\n

    \n
    \n \n

    Overflow Ex. 2 Multi Line

    \n
    \n \n
  • \n
  • \n \n

    \n Fourth step\n

    \n
    \n \n

    \n
    \n \n
  • \n
  • \n \n \n \n

    \n Fifth step\n

    \n
    \n \n

    \n
    \n \n
  • \n\n
\n\"\"\"\n","sub_path":"bread/layout/components/progress_indicator.py","file_name":"progress_indicator.py","file_ext":"py","file_size_in_byte":6308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"326811926","text":"from textblob import TextBlob\nimport nltk\nfrom nltk.corpus import stopwords\nfrom pathlib import Path\nimport pandas as pd\nfrom pathlib import Path\n\nblob = TextBlob(Path(\"RomeoAndJuliet.txt\").read_text())\nprint(blob.word_counts[\"juliet\"])\nprint(blob.word_counts[\"romeo\"])\nprint(blob.word_counts[\"thou\"])\nprint(blob.word_counts[\"joy\"])\nprint(blob.noun_phrases.count(\"lady capulet\"))\n\nstops = stopwords.words(\"english\")\nmore_stops = [\"thee\", \"thy\", \"thou\"]\nstops += more_stops\n\nitems = blob.word_counts.items()\n# print(items) # prints dict_items with list of tuples, words and count of them as values\nitems = [item for item in items if item[0] not in stops]\nprint(items[:10])\n\nfrom operator import itemgetter\n\nsorted_items = sorted(items)\nsorted_items = sorted(items, key=itemgetter(1), reverse=True)\nprint(sorted_items[:10]) # prints words, top 10\ntop20 = sorted_items[:20]\nprint(top20)\n\ndf = pd.DataFrame(top20, columns=[\"word\", \"count\"])\n\nimport matplotlib.pyplot as plt\n\ndf.plot.bar(\n x=\"word\", y=\"count\", rot=0, legend=False, color=[\"y\", \"c\", \"m\", \"b\", \"g\", \"r\"]\n)\nplt.gcf().tight_layout()\n# plt.show()\n\nfrom pathlib import Path\nfrom wordcloud import WordCloud\nimport imageio\n\ntext = Path(\"RomeoAndJuliet.txt\").read_text()\n\n# print(text)\n\nmask_image = imageio.imread(\"mask_heart.png\")\nwordcloud = WordCloud(colormap=\"prism\", mask=mask_image, background_color=\"white\")\nwordcloud = wordcloud.generate(text)\nwordcloud = wordcloud.to_file(\"RomeoAndJulietHeart.png\")\nplt.imshow(wordcloud)\nprint(\"done\")\n","sub_path":"NLP/nlp_2.py","file_name":"nlp_2.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"269471545","text":"\n\nfrom xai.brain.wordbase.verbs._reason import _REASON\n\n#calss header\nclass _REASONS(_REASON, ):\n\tdef __init__(self,): \n\t\t_REASON.__init__(self)\n\t\tself.name = \"REASONS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"reason\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_reasons.py","file_name":"_reasons.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"114464645","text":"import pandas as pd\nimport numpy as np\n\ndef dataFrameToSavableRecords(df, dataSet):\n saveRecords = []\n subset = df\n if dataSet:\n subset = df[dataSet]\n for subData in [tuple(x) for x in subset.values]:\n saveRecords.append(subData)\n return saveRecords\n\n\ndef nanToNone(df_data, columnList):\n for column in columnList:\n df_data.loc[:, (column)] = np.where(df_data[column].isna(), None, df_data[column])\n return df_data","sub_path":"tools/PandasDBUtils.py","file_name":"PandasDBUtils.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"619838683","text":"import datetime\nimport time\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport boto3\nimport tempfile\nimport mimetypes\nimport uuid\nfrom django.forms.models import model_to_dict\nfrom batch_controller.models import CommentTb, BookmarkTb\nfrom vision_controller import views as vision_views\nfrom vision_controller.models import VisionTb\n\nbucket_name = 'papao-s3-bucket'\ns3 = boto3.resource('s3')\nbucket = s3.Bucket(bucket_name)\nhostname = \"http://220.230.121.76:8000\"\n\n\n# hostname = \"localhost:8000\"\n\ndef get_image(request, filename):\n f = tempfile.TemporaryFile()\n bucket.download_fileobj(filename, f)\n f.seek(0)\n return HttpResponse(f.read(), content_type=mimetypes.guess_type(filename))\n\n\n@csrf_exempt\ndef post_image_from_street(request):\n try:\n files = request.FILES.getlist('file')\n post_type = request.POST['post_type']\n response = vision_views.get_vision_result_by_file(files[0])\n up_kind_code, kind_code = vision_views.get_kind_type_codes(response.label_results.label)\n filenames = list(map(lambda x: upload_image(x), files))\n vision_views.insert_vision_result(color_results=response.color_results, label_results=response.label_results,\n post_type=post_type.upper(), url=hostname + \"/v1/download/\" + filenames[0])\n return JsonResponse(\n {'status': 'OK', 'image_url': list(map(lambda x: hostname + \"/v1/download/\" + x, filenames)),\n 'kind_code': kind_code, 'up_kind_code': up_kind_code})\n except Exception as e:\n return JsonResponse({'status': 'Failure', \"message\": str(e)})\n\n\n@csrf_exempt\ndef post_image(request):\n try:\n files = request.FILES.getlist('file')\n filenames = list(map(lambda x: upload_image(x), files))\n return JsonResponse(\n {'status': 'OK', 'image_url': list(map(lambda x: hostname + \"/v1/download/\" + x, filenames))})\n except Exception as e:\n return JsonResponse({'status': 'Failure', \"message\": str(e)})\n\n\ndef delete_image(request, filename):\n response = bucket.delete_objects(\n Delete={\n 'Objects': [\n {\n 'Key': filename\n },\n ]\n }\n )\n VisionTb.objects.filter(image_url__endswith=filename).delete()\n return JsonResponse(response)\n\n\n@csrf_exempt\ndef search_image(request, post_id):\n try:\n now = datetime.datetime.now()\n ts = time.time()\n result_post, result_url = vision_views.get_search_result_with_time(post_id=post_id,\n start_date=now - datetime.timedelta(weeks=4),\n end_date=now)\n temp_list = list()\n for i, item in enumerate(result_post):\n temp = encode_post_to_result(item)\n temp['imageUrls'] = [\n {\n \"key\": 0,\n \"url\": result_url[i]\n }\n ]\n temp['bookmarkCount'] = BookmarkTb.objects.filter(post_id__exact=item.id).count()\n temp['commentCount'] = CommentTb.objects.filter(post_id__exact=item.id).count()\n temp_list.append(temp)\n return JsonResponse({'currentPage': 0,\n \"totalElements\": 0,\n \"totalPages\": 0,\n \"status\":\"OK\",\n \"elements\": temp_list})\n except Exception as err:\n print(\"Search Error : %s\" % str(err))\n return JsonResponse({'currentPage': 0,\n \"totalElements\": 0,\n \"totalPages\": 0,\n \"elements\": [],\n 'status':'fail'})\n\n\n\n@csrf_exempt\ndef test_search_image(request, post_id):\n now = datetime.datetime.now()\n result_post, result_url = vision_views.get_search_result_with_time(post_id=post_id,\n start_date=now - datetime.timedelta(weeks=4),\n end_date=now)\n return HttpResponse(\"\\n\".join(list(map(lambda x:\"\"%x,result_url))))\n\n\ndef index(request):\n return HttpResponse(\"Hello, world!\")\n\n\ndef upload_image(file):\n filename = \".\".join([uuid.uuid4().hex, file.name.split(\".\")[-1]])\n bucket.upload_fileobj(file, filename)\n return filename\n\ndef encode_post_to_result(item):\n return {\"id\":item.id,\"createdDate\":item.created_date,'updatedDate': item.updated_date,\n \"genderType\":item.gender_type, \"happenDate\":item.happen_date, \"happenPlace\":item.happen_place,\n \"kindName\":item.kind_name, \"postType\":item.post_type,\"hitCount\":item.hit_count,\n \"stateType\":item.state_type}\n","sub_path":"papao/papao-image-master/aws_controller/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"590364352","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/6/27 20:20\n# @Author : Weiyang\n# @File : BERT_Clustering_distance.py\n\n########################################################################################################################\n# 基于BERT的词聚类关键词抽取算法\n# 算法基本思想:\n# 首先��文档进行分词,获取候选关键词,用于选取聚类中心词;\n# 然后将文档中的词进行聚类并获取聚类中心词;\n# 对文档进行词性标注,基于一定的模式(比如,0个或多个形容词跟随1个或者多个名词的词串作为名词短语)获取候选短语;\n# 这里的模式是,我们抽取指定词性的单词,将其前后各一个词与其本身构成的子串作为候选短语;\n# 最后,选择包含一个或多个聚类中心的短语作为最终的关键词。\n\n# 聚类算法衡量相似性的标准是:欧式距离\n########################################################################################################################\n\nfrom bert_serving.client import BertClient\nimport jieba as jb\nfrom sklearn.cluster import KMeans\nimport jieba.posseg as pseg\nfrom collections import defaultdict\nimport re\nimport numpy as np\n\nclass BERT_Clustering:\n '''基于BERT的词聚类关键词抽取算法'''\n\n def __init__(self,n_cluster=10, patterns=['nr','ns','nt','nz','a','an','m','v'],stopwordspath='../stopwords/百度停用词表.txt'):\n self.stopwords = self.Readstopwords(stopwordspath) # 停用词\n self.n_cluster = n_cluster # 聚类的簇数\n self.patterns = patterns # [pattern1,pattern2,...] 用于抽取候选短语的词性搭配模式,比如[名词,形容词,..]\n # patterns 待抽取的词性,最后输出的是:以该词性的词为中心词,前后各取一个词构成子串\n\n def Readstopwords(self, stopwordspath):\n '''read stopwords'''\n with open(stopwordspath, 'r', encoding='utf-8-sig') as fi:\n stopwords = []\n for line in fi:\n line = line.strip()\n stopwords.append(line)\n return stopwords\n\n def Readcontent(self,filecontent):\n '''将文本切割成句子'''\n line_lst = re.split(\"[。?!?!,,.;;]\",filecontent)\n all_words = [] # 候选词,用于聚类,选取聚类中心词\n for line in line_lst:\n # 分词\n words = jb.cut(line)\n # 词性标注\n #word_flag = pseg.cut(line)\n # 去除非中文字符\n words = [word for word in words if '\\u4e00' <= word <= '\\u9fff']\n all_words.extend(words)\n # 过滤重复词\n all_words = list(set(all_words))\n return line_lst,all_words # 句子序列,单词序列\n\n def getPointsDistance(self,points1,points2):\n '''获取点集points1与points2中每两个点之间的距离,原理:(A-B)**2 = A**2 + B**2 - 2AB'''\n A,B = np.array(points1[:]),np.array(points2[:])\n BT = np.transpose(B)\n A_BT = np.dot(A,BT)\n Asq = A**2\n Asq = np.tile(np.sum(Asq,axis=1,keepdims=True),(1,A_BT.shape[1]))\n Bsq = BT ** 2\n Bsq = np.tile(np.sum(Bsq, axis=0, keepdims=True), (A_BT.shape[0],1))\n ED = np.sqrt(Asq+Bsq-2*A_BT) # 欧式距离\n return ED\n\n def calculate_similarity(self,all_words):\n '''\n 对文档中的单词进行聚类,并获取聚类中心词\n all_words:[word1,word2,...]\n '''\n bc = BertClient()\n # 存储每个单词的词向量\n points = bc.encode(all_words).tolist()\n # 开始聚类\n k = KMeans(n_clusters=self.n_cluster)\n k.fit_predict(points)\n # 每个单词的类别\n labels = k.labels_\n # 返回每个单词与聚类中心的距离\n distance = k.fit_transform(points).tolist()\n # 获取每个聚类中心与每个单词的距离\n distance = np.array(distance).transpose()\n # 获取与每个聚类中心最近的单词,即最靠近聚类中心的单词\n indexs = [np.argmin(distance[center]) for center in range(self.n_cluster)]\n # 最靠近每个聚类中心的单词\n center_words = [all_words[index] for index in indexs]\n # 获取每个类中的单词\n cluster_words = defaultdict(list)\n for index,label in enumerate(labels):\n cluster_words[label].append(all_words[index])\n return center_words,labels ,cluster_words # 聚类中心的单词,每个单词的类别,每个类中的单词\n\n def getPosseg(self,lines):\n '''\n 基于一定模式抽取词性搭配的短语作为候选短语,lines是句子列表\n '''\n # 存储符合条件的候选短语\n phrases = []\n # 遍历句子\n for line in lines:\n # 对句子进行词性标注\n result = pseg.cut(line)\n # 单词序列\n words = []\n # 词性序列\n psgs = []\n # 获取词性标注的结果\n for w in result:\n words.append(w.word)\n psgs.append(w.flag)\n # 遍历单词序列和词性序列\n previous = '' # 存储当前词的前一个单词\n count = 0\n for word,flag in zip(words,psgs):\n # 如果当前词的词性在patterns中,则抽取当前词前后各一个词构成子串\n if flag in self.patterns:\n # 如果当前词不是末尾词\n if count < len(words)-1:\n subString = previous+word+words[count+1]\n else:\n subString = previous+word\n # 将符合条件的子串加入到候选短语中\n phrases.append(subString)\n # 移动前一个单词\n previous = word\n count += 1\n # 过滤重复词\n phrases = list(set(phrases))\n return phrases # 返回候选短语\n\n def extract_Keywords(self,filecontent):\n '''抽取关键词,filecontent 待抽取的文档内容'''\n # 文档的句子列表,不重复的单词列表\n lines,words = self.Readcontent(filecontent)\n # 读取停用词\n stopwords = self.Readstopwords()\n # 去除停用词\n words = [word for word in words if word not in stopwords]\n # 获取中心词\n center_words, labels, clusters = bt.calculate_similarity(words)\n # 获取候选短语列表\n phrases = self.getPosseg(lines)\n keywords = [] # 存储关键词\n # 遍历候选短语\n for phrase in phrases:\n # 遍历中心词,查看候选短语中是否包含中心词,如果包含,则输出\n for center in center_words:\n if center in phrase:\n keywords.append(phrase)\n break\n return keywords\n\nif __name__ == '__main__':\n filecontent = '韩媒称,国际油价暴跌让中国笑逐颜开。' \\\n '分析称,中国在国际油价暴跌后“三管齐下”,大幅缩减石油进口费用、扩大战略储备油,' \\\n '同时向资金短缺的产油国提供贷款以壮大亲中势力。美银美林指出,国际油价每下跌10%,中国GDP会增长0.15%。'\n bt = BERT_Clustering(n_cluster=10)\n keywords = bt.extract_Keywords(filecontent)\n print(keywords)","sub_path":"src/BERT_Clustering_distance.py","file_name":"BERT_Clustering_distance.py","file_ext":"py","file_size_in_byte":7474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"360603284","text":"class Person :\n # 在类中可以定义一些特殊方法(魔术方法)\n # 特殊方法都是以__开头,__结尾的方法\n # 特殊方法不需要我们自己调用,不要尝试去调用特殊方法\n # 特殊方法将会在特殊的时刻自动调用\n\n def __init__(self,name):\n # 通过self向新建的对象中初始化属性\n self.name = name\n\n def say_hello(self):\n print('大家好,我是%s'%self.name)\n\np1 = Person('孙悟空')\np2 = Person('猪八戒')\np3 = Person('沙和尚')\np4 = Person('唐僧')\n\np3.say_hello()\np4.say_hello()","sub_path":"lesson_06_对象/code/03.对象的初始化.py","file_name":"03.对象的初始化.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"283058273","text":"import re\nimport requests\n\n\ndef parse_locales(file_data):\n locales = []\n for locale_name, locale_var in re.findall(\n r'{title:\"([^\"]+)\",locale:([^}]+)}', file_data.decode(\"utf-8\")\n ):\n locale_id = f\"{locale_var[2:4]}-{locale_var[4:]}\"\n locales.append((locale_id, locale_name))\n return locales\n\n\ndef main():\n url = \"https://assets-www.xbox.com/xbox-web/static/js/LocalePickerPage.7c45fcf5.chunk.js\"\n resp = requests.get(url, timeout=60)\n resp.raise_for_status()\n for locale_id, locale_name in parse_locales(resp.content):\n print(f'ret.Add(new XboxLocale(\"{locale_id}\", \"{locale_name}\"));')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ParseLocale.py","file_name":"ParseLocale.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"107602458","text":"for t in range(1, 11):\n T = int(input())\n numlst = list(map(int, input().split()))\n idx = 0\n\n while numlst[-1] != 0:\n subtract = idx + 1\n numlst[0] = numlst[0] - subtract\n if numlst[0] < 0: numlst[0] = 0\n temp = [0] * 8\n for i in range(8):\n temp[i] = numlst[(i+1) % 8]\n numlst = temp\n idx = (idx + 1) % 5\n \n print('#{0}'.format(t), end=' ')\n print(*numlst)","sub_path":"python/swea/password.py","file_name":"password.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"389992587","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 23 22:28:00 2019\r\n\r\n@author: Michael\r\n\"\"\"\r\n\r\ndef disjoint_1(a, b, c):\r\n \"\"\" Return True if there is no element common to all three lists a, b and c \"\"\"\r\n\r\n for i in a:\r\n for j in b:\r\n for k in c:\r\n if i == j == k:\r\n return False\r\n return True","sub_path":"COMP9024/Chapter3/disjoint_1.py","file_name":"disjoint_1.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"312323508","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport os\nimport torchsnooper\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n# 定义模型\nclass GRUModel(nn.Module):\n def __init__(self, args):\n super(GRUModel, self).__init__()\n self.embed_dim = args.embed_dim\n self.hidden_size = 128\n self.layer_num = 1\n self.embeddings = nn.Embedding(21128, self.embed_dim)\n self.gru = nn.GRU(self.embed_dim, \n self.hidden_size, \n self.layer_num,\n batch_first=True)\n self.fc = nn.Linear(self.hidden_size, 2)\n\n def forward(self, x):\n x = self.embeddings(x)\n h0 = torch.zeros(self.layer_num, x.size(0), self.hidden_size)\n h0 = h0.to(device)\n out, hn = self.gru(x, h0)\n out = self.fc(out[:, -1, :])\n\n return out\n\n\n#@torchsnooper.snoop() \n# 训练GRU\ndef grutrain(train_iter, dev_iter, model, args):\n optimizer = optim.Adam(model.parameters(), lr = args.lr)\n\n best_acc = 0\n steps = 0\n last_step = 0\n model = model.to(device)\n \n print('training...')\n for epoch in range(args.epochs):\n model.train()\n for batch in train_iter: \n feature, target = batch[1], batch[0] #(W,N) (N)\n feature = feature.to(device)\n target = target.to(device)\n # 清除梯度\n optimizer.zero_grad()\n output = model(feature)\n loss = F.cross_entropy(output, target)\n loss.backward()\n optimizer.step()\n\n steps += 1\n if steps % 10 == 0:\n result = torch.max(output,1)[1].view(target.size())\n corrects = (result.data == target.data).sum()\n accuracy = corrects*100.0/len(batch[0])\n print('\\rBatch[{}] - loss: {:.6f} acc: {:.4f}'.format(\n steps, loss.data.item(), accuracy))\n elif steps % 200 == 0:\n save(model,args.save_dir,'snapshot',steps)\n dev_acc = eval(dev_iter, model, args)\n if dev_acc > best_acc:\n best_acc = dev_acc\n last_step = steps\n if args.save_best:\n save(model,args.save_dir,'best',steps)\n\n\ndef save(model, save_dir, save_prefix, steps):\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n save_prefix = os.path.join(save_dir,save_prefix)\n save_path = '{}_steps_{}.pt'.format(save_prefix,steps)\n torch.save(model.state_dict(),save_path)\n\ndef eval(data_iter, model, args):\n model.eval()\n corrects, avg_loss = 0,0\n for batch in data_iter:\n feature, target = batch[1], batch[0]\n feature = feature.to(device)\n target = target.to(device)\n \n logit = model(feature)\n loss = F.cross_entropy(logit,target)\n \n avg_loss += loss.data\n result = torch.max(logit,1)[1]\n corrects += (result.view(target.size()).data == target.data).sum()\n \n size = len(data_iter.dataset)\n avg_loss /= size \n accuracy = 100.0 * corrects/size\n print('\\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \\n'.format(avg_loss,accuracy,corrects,size))\n \n return accuracy\n","sub_path":"distill_test/GRUTrain.py","file_name":"GRUTrain.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"601289747","text":"for tc in range(int(input())):\n n=int(input())\n arr=[list(map(int,input().split())) for i in range(n)]\n res=0\n visited=[0 for i in range(n)]\n\n def func(idx,value):\n global res\n if idx==n:\n res=max(value,res)\n return\n if value<=res:\n return\n for i in range(n):\n if not visited[i]:\n visited[i]=1\n func(idx+1,value*arr[i][idx]/100)\n visited[i]=0\n func(0,1)\n print(f'#{tc+1} {res:.6f}')\n","sub_path":"1865.py","file_name":"1865.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"546947561","text":"l = [3, 4, 2]\n# a\nmul = 1\nfor i in range(len(l)):\n mul *= l[i]\nprint(\"Multiplication of numbers:\", mul)\n\n\n# b\nmul = 1\nfor i in l:\n mul *= i\nprint(\"Multiplication of numbers in list:\", mul)","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"396051249","text":"import csv\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Fix csv file.')\n parser.add_argument('sourcecsv', nargs=1, help='source csv to fix')\n parser.add_argument('targetcsv', nargs=1, help='target fixed csv to create')\n parser.add_argument('--in-delimiter', nargs='?', help='delim')\n parser.add_argument('--in-quote', nargs='?', help='quote')\n args = parser.parse_args()\n\n if not args.in_delimiter or not args.in_quote:\n with open(args.sourcecsv[0], 'r') as sfile:\n dialect = csv.Sniffer().sniff(sfile.read(1024))\n sfile.seek(0)\n source_reader = csv.reader(sfile, dialect)\n with open(args.targetcsv[0], 'w') as tfile:\n target_writer = csv.writer(tfile, delimiter=',')\n for row in source_reader:\n target_writer.writerow(row)\n else:\n dlim = args.in_delimiter\n quote = args.in_quote\n with open(args.sourcecsv[0], 'r') as sfile:\n source_reader = csv.reader(sfile, delimiter=dlim, quotechar=quote)\n with open(args.targetcsv[0], 'w') as tfile:\n target_writer = csv.writer(tfile, delimiter=',')\n for row in source_reader:\n target_writer.writerow(row)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Exercises/fix_csv/fix_csv.py","file_name":"fix_csv.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"323698075","text":"# Import libraries.\nimport data_loader\nimport scheduler\nimport csv_writer\nimport os\nimport sys\nimport time\nimport datetime\nimport logging\nfrom pymongo import MongoClient\n\n# Set up database connection.\nclient = MongoClient(os.environ['DB_PORT_27017_TCP_ADDR'],27017)\ndb = client.trial\n\n# Global variable to keep track of week number.\nweek_number = 0\n\n#-----------------------------------------------------------------------#\n#\t\t\t\t\tFunction: Bus Speed Crawler Init\t\t\t\t\t#\n#-----------------------------------------------------------------------#\ndef bus_speed_crawler_init():\n\t# Update week number and convert to string.\n\tglobal week_number\n\tweek_number = week_number + 1\n\tweek_parameter = str(week_number)\n\n\t# Call data_loader for the week.\n\tlogger.info(\"Calling Data Loader For Week: \"+week_parameter)\n\tdata_loader.load_data(week_parameter)\n\tlogger.info(\"Loaded Data for Week: \"+week_parameter)\n\t\n\t# Call scheduler for the week.\n\tlogger.info(\"Calling Scheduler for Week: \"+week_parameter)\n\tscheduler.schedule_trips(week_parameter)\n\tlogger.info(\"Crawled all trips for Week: \"+week_parameter)\n\n# Non-stop loop to start weekly crawls.\nwhile True:\n\t# Scheduled start time for normal-crawler for every week.\n\tstart_day = \"Sunday\"\n\tstart_hour = \"18\"\n\tstart_min = \"15\"\n\n\t# Calculate current time and check if current time = start time.\n\tnow = datetime.datetime.now()\n\tcurrent_day = now.strftime(\"%A\")\n\tif start_day == current_day and start_hour==str(now.hour) and start_min==str(now.minute):\n\t\t# Create log.\n\t\tlogger = logging.getLogger(\"bus_speed_crawler\")\n\t\tlogger.setLevel(logging.DEBUG)\n\n\t\t# Create the log handler & reset every week.\n\t\tlh = logging.FileHandler(\"bus_speed_crawler_log.txt\", mode=\"w\")\n\n\t\t# Format the log.\n\t\tformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\t\tlh.setFormatter(formatter)\n\n\t\t# Add handler to the logger object.\n\t\tlogger.addHandler(lh)\n\n\t\t# Call normal crawler.\n\t\tlogger.info(\"Starting Bus Speed Crawler\")\n\t\tprint (\"Starting Bus Speed Crawler on \"+str(now))\n\t\tbus_speed_crawler_init()\n\t\tprint (\"Week \" + str(week_number) + \" Done.\")\n\t\tlogger.info(\"Week \" + str(week_number) + \" Done.\")\n\n\t\t# Close log.\n\t\tlogger.removeHandler(lh)\n\n\t# Check every 30 seconds.\n\ttime.sleep(30)\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"555868628","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\nderiveGeneStructure.py\n\nCreated on Wed Jul 26 19:00:24 2017\n\n@author: mfortz\n\"\"\"\n\n\ndef deriveGeneStructure(all_gene_file):\n \n \"\"\"\n copy of structures/main.py\n find out how to load object from another script\n \"\"\"\n \n # might need to switch directory to structures\n\n from DataSet import DataSet\n from GeneFamily import GeneFamily\n \n \n \"\"\"Create all Gene objects\"\"\"\n data = DataSet()\n \n for l in all_gene_file[1:]:\n data.addGene(l.split())\n \n \n \"\"\"Create a list of Species\"\"\"\n def generateSpeciesDict():\n for g in data.genesDict:\n currentSpecies = data.genesDict[g].species\n if currentSpecies not in data.speciesDict:\n data.addSpecies(currentSpecies)\n \n generateSpeciesDict()\n \n \n \"\"\"Make Family Dictionary\"\"\"\n def generateFamilyDict():\n for g in data.genesDict:\n currentGene = data.genesDict[g]\n \n if currentGene.family not in data.familiesDict:\n currentFamily = GeneFamily(currentGene.family,data.speciesDict) \n data.addFamily(currentFamily.familyName,currentFamily)\n else:\n currentFamily = data.familiesDict[currentGene.family]\n currentFamily.addToFamily(currentGene)\n \n \n generateFamilyDict()\n \n \n \n \"\"\"Make a dictionary which gives information about gene order \"\"\"\n\n geneOrder = {}\n for s in data.speciesDict:\n geneOrder[s] = {}\n \n for g in data.genesDict:\n currentGene = data.genesDict[g]\n try:\n geneOrder[currentGene.species][currentGene.ctg].append(currentGene) #pass gene object\n except KeyError:\n geneOrder[currentGene.species][currentGene.ctg] = [currentGene]\n \n \n #sort genes in their contig\n for s in geneOrder:\n for c in geneOrder[s]:\n geneOrder[s][c].sort(key = lambda gene: int(gene.start))\n \n \n return data, geneOrder\n \n ","sub_path":"Joined_families_pipeline/deriveGeneStructure.py","file_name":"deriveGeneStructure.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"509881996","text":"from cn.edustar.jitar.util import ParamUtil\r\nfrom group_member_query import GroupMemberQuery\r\nfrom cn.edustar.jitar.pojos import Group\r\nfrom base_preparecourse_page import PrepareCoursePlanQuery, PrepareCourseQuery\r\n\r\nclass group_preparecourseplan_list(ActionExecutor, ShowGroupBase, RequestMixiner, ResponseMixiner, PageCheckMixiner): \r\n def __init__(self):\r\n self.params = ParamUtil(request)\r\n self.group_svc = __jitar__.groupService\r\n self.preparecourse_svc = __jitar__.prepareCourseService\r\n \r\n def execute(self):\r\n response.setContentType(\"text/html; charset=UTF-8\")\r\n groupName = request.getAttribute(\"groupName\")\r\n if groupName == None or groupName == \"\":\r\n return self.notFound() \r\n \r\n group = group_svc.getGroupByName(groupName)\r\n if group == None:\r\n return self.notFound()\r\n \r\n pager = self.params.createPager()\r\n qry = PrepareCoursePlanQuery(\"\"\" pcp.title, pcp.prepareCoursePlanId, pcp.startDate,pcp.endDate,pcp.planContent,pcp.createDate, pcp.defaultPlan \"\"\")\r\n qry.groupId = group.groupId\r\n pager.setPageSize(16) \r\n pager.itemName = u\"备课计划\"\r\n pager.itemUnit = u\"个\"\r\n plan_list = qry.query_map(pager)\r\n pager.totalRows = plan_list.size() \r\n \r\n request.setAttribute(\"plan_list\", plan_list)\r\n request.setAttribute(\"pager\",pager)\r\n \r\n page = self.getGroupIndexPage(group)\r\n page = {\r\n \"pageId\":0,\r\n \"layoutId\":2, # 固定是布局2\r\n \"isSystemPage\" : \"true\", \r\n \"owner\" : \"user\", \r\n \"title\" :\"\",\r\n \"skin\":page.skin\r\n }\r\n \r\n # 构造widgets .\r\n widgets = [\r\n {\"id\": \"1\", \"pageId\":0, \"columnIndex\":1,\"title\":u\"协作组信息\",\"module\":\"group_info\", \"ico\":\"\", \"data\":\"\"}\r\n ] \r\n self.getGroupInfo(group.groupName)\r\n request.setAttribute(\"widget_list\", widgets)\r\n request.setAttribute(\"widgets\", widgets)\r\n request.setAttribute(\"page\", page)\r\n return \"/WEB-INF/group/default/group_preparecourseplan_list.ftl\"","sub_path":"WebContent/WEB-INF/py/group_preparecourseplan_list.py","file_name":"group_preparecourseplan_list.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"151634659","text":"import requests\nimport re\nimport json\nimport time\n\n\n\ndef get_html(url,headers=None):\n global TryCount\n req = requests.get(url,headers)\n if req.status_code < 400:\n html_text = req.content.decode()\n TryCount = 0\n return html_text\n else:\n if TryCount < 3:\n TryCount += 1\n get_html(url,headers)\n else:\n return None\n\ndef get_html_by_proxy(url,headers=None,proxies=None):\n global TryCount\n # print(proxies)\n req = requests.get(url, headers,proxies=proxies)\n # print(req.text)\n if req.status_code < 400:\n html_text = req.content.decode()\n TryCount = 0\n return html_text\n else:\n print(req.text)\n if TryCount < 3:\n TryCount += 1\n get_html_by_proxy(url, headers,proxies=proxies)\n else:\n return None\n\n\ndef get_local_ip():\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'User-Agent': 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.37(KHTML,likeGecko)Chrome/69.0.3497.81Safari/537.36',\n 'Host': 'www.ip138.com'\n }\n url = 'http://200019.ip138.com/'\n html_text = get_html(url,headers=headers)\n ip = re.findall('您的IP地址是:\\[(.*?)\\]',html_text)\n if ip:\n ip = ip[0]\n return ip\n else:\n return None\n\n\ndef add_white_ip():\n white_ip = get_local_ip()\n if white_ip:\n url = 'http://web.http.cnapi.cc/index/index/save_white?neek=76707&appkey=d30956ff015f29d294559648dd1818fe&white=%s' % white_ip\n requests.get(url)\n print('已经将IP %s,添加到白名!'% white_ip)\n else:\n print('没有获取到本地IP地址,添加白名单失败!')\n pass\n\ndef get_proxy():\n global ProxyTime\n global Proxies\n # print(time.time()-ProxyTime)\n if ProxyTime ==0 or time.time()-ProxyTime >= 300:\n url = 'http://http.tiqu.alicdns.com/getip3?num=1&type=2&pro=&city=0&yys=0&port=1&pack=59715&ts=0&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1®ions='\n html_text = get_html(url)\n html_json = json.loads(html_text)\n print(html_json)\n if html_json['code'] == 0:\n ip = html_json['data'][0]['ip']\n port = html_json['data'][0]['port']\n proxyMetaHttp = \"http://%(host)s:%(port)s\" % {\n \"host\": ip,\n \"port\": port,\n }\n proxyMetaHttps = \"http://%(host)s:%(port)s\" % {\n \"host\": ip,\n \"port\": port,\n }\n Proxies = {\n \"http\": proxyMetaHttp,\n # \"https\":proxyMetaHttps\n }\n getProxyTime = time.time()\n return Proxies\n else:\n print('获取代理ip失败!')\n return None\n else:\n pass\n\n\n\n\n\n\n#配置\nProxyTime = 0\nProxies = ''\nsetTryCount = 3\nTryCount = 0\n\n\n#添加IP进芝麻HTTP代理白名单\nadd_white_ip()\n\n\n\nif __name__ == '__main__':\n #以代理方式访问\n url = 'http://200019.ip138.com/'\n proxies = get_proxy()\n print(proxies)\n print(get_html_by_proxy(url,proxies=Proxies))\n\n","sub_path":"tools/Crawler/ip_proxy_zhima.py","file_name":"ip_proxy_zhima.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"456897423","text":"import tensorflow as tf\r\nimport tensorflow_probability as tfp\r\nimport numpy as np\r\nimport gpflow as gp\r\nfrom gpflow.utilities import print_summary\r\n\r\n\r\ndef logistic_bjt(lower, upper):\r\n '''define logistic bijector'''\r\n low= tf.constant(lower, dtype= tf.float64); high= tf.constant(upper, dtype= tf.float64)\r\n affine = tfp.bijectors.AffineScalar(shift=low.numpy(), scale=(high.numpy() - low.numpy()))\r\n sigmoid = tfp.bijectors.Sigmoid()\r\n logistic = tfp.bijectors.Chain([affine, sigmoid])\r\n return logistic\r\n\r\ndef set_and_optimize_gp_model(optimize, D, Xt, Yt, Yt_cost, noise, noise_cost, kernel, latent_cost_kernel, logistic, logistic_noise):\r\n\r\n if optimize:\r\n\r\n kernel= gp.kernels.RBF(lengthscales= np.array([1]*D))\r\n latent_cost_kernel= gp.kernels.RBF(lengthscales= np.array([1]*D))\r\n\r\n model = gp.models.GPR((Xt, Yt), kernel=kernel)\r\n '''set hyperparameter constraints'''\r\n model.kernel.lengthscales = gp.Parameter(model.kernel.lengthscales.numpy(), transform=logistic)\r\n model.kernel.variance = gp.Parameter(model.kernel.variance.numpy(), transform=logistic)\r\n model.likelihood.variance = gp.Parameter(model.likelihood.variance.numpy(), transform=logistic_noise)\r\n\r\n opt_obj = gp.optimizers.Scipy()\r\n opt_obj.minimize(model.training_loss, model.trainable_variables, options=dict(maxiter=100))\r\n\r\n\r\n log_Yt_cost = np.log(Yt_cost)\r\n latent_cost_model = gp.models.GPR((Xt, log_Yt_cost), latent_cost_kernel)\r\n '''set hyperparameter constraints'''\r\n latent_cost_model.kernel.lengthscales = gp.Parameter(latent_cost_model.kernel.lengthscales.numpy(), transform=logistic)\r\n latent_cost_model.kernel.variance = gp.Parameter(latent_cost_model.kernel.variance.numpy(), transform=logistic)\r\n latent_cost_model.likelihood.variance = gp.Parameter(latent_cost_model.likelihood.variance.numpy(), transform=logistic_noise)\r\n\r\n opt_cost = gp.optimizers.Scipy()\r\n opt_cost.minimize(latent_cost_model.training_loss, latent_cost_model.trainable_variables, options=dict(maxiter=100))\r\n\r\n noise= model.likelihood.variance.numpy()\r\n noise_cost= latent_cost_model.likelihood.variance.numpy()\r\n\r\n print('printing objective model summary')\r\n print_summary(model)\r\n print('lengthscale values of objective model: ',model.kernel.lengthscales)\r\n print('printing latent cost model summary')\r\n print_summary(latent_cost_model)\r\n print('lengthscale values of latent cost model: ',model.kernel.lengthscales)\r\n\r\n\r\n else:\r\n\r\n model = gp.models.GPR((Xt, Yt), kernel=kernel)\r\n model.likelihood.variance.assign(noise)\r\n\r\n log_Yt_cost = np.log(Yt_cost)\r\n latent_cost_model = gp.models.GPR((Xt, log_Yt_cost), latent_cost_kernel)\r\n latent_cost_model.likelihood.variance.assign(noise_cost)\r\n\r\n return model, latent_cost_model, noise, noise_cost, log_Yt_cost, kernel, latent_cost_kernel\r\n","sub_path":"bo_cost_budget_cont_domain/hyperparameter_optimization.py","file_name":"hyperparameter_optimization.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"580736394","text":"from lib.preprocessing.openlibrary import (\n OL_txt_to_csv,\n OL_preprocess_raw_dataset,\n OL_join_authors_books,\n)\nfrom lib.preprocessing.feltrinelli import (\n preprocessing_feltrinelli,\n join_fltr_ol,\n fltr_RL_ol,\n merge_enriched_books,\n)\nfrom lib.scraping.scrape import (\n scrape_from_feltrinelli,\n enr_from_mondadori,\n enr_from_hoepli,\n)\nfrom lib.utility.lib import load_to_hdfs\nfrom lib.utility.data_analysis import analysis_data\nfrom dask.distributed import Client\nimport dask.dataframe as dd\nimport dask.bag as db\nimport config as cfg\nimport os\nimport pandas as pd\nimport dask\n\n\ndef main(\n config,\n scrape_feltrinelli=False,\n prep_feltrinelli=False,\n txt_to_csv_books=False,\n txt_to_csv_authors=False,\n join_authors_books=False,\n enrich_fltr=False,\n redefine_fltr=False,\n):\n \"\"\"Handler of the entire pipeline.\n\n Args:\n config (config): Object which cointains all the configurations of the project.\n scrape_feltrinelli (bool, optional): Set True for scraping books catalog from Feltrinelli. Defaults to False.\n txt_to_csv_books (bool, optional): Set True for converting OL books txt to CSV. Defaults to False.\n txt_to_csv_authors (bool, optional): Set True for converting OL authors txt to CSV. Defaults to False.\n join_authors_books (bool, optional): Set True to enrich OL's books catalog with OL's authors catalog. Defaults to False.\n enrich_fltr (bool, optional): Set True to enrich Feltrinelli's catalogue with Open Library, Mondadori and Hoepli's books catalogue\n \"\"\"\n if scrape_feltrinelli:\n scrape_from_feltrinelli(\n cfg.scraping_fltr[\"books_for_page\"],\n cfg.scraping_fltr[\"timeout\"],\n cfg.scraping_fltr[\"path_output\"],\n cfg.scraping_fltr[\"name_file_out\"],\n )\n if prep_feltrinelli:\n preprocessing_feltrinelli(\n cfg.preprocessing_fltr[\"input_file_path\"],\n cfg.preprocessing_fltr[\"path_file_out\"],\n cfg.preprocessing_fltr[\"name_file_out\"],\n )\n if txt_to_csv_books:\n OL_txt_to_csv(\n cfg.books[\"path_input_books\"],\n cfg.books[\"path_splitted_catalog\"],\n cfg.books[\"path_full_catalog\"],\n cfg.books[\"name_full_books_catalog\"],\n cfg.books[\"books_cols\"],\n cfg.books[\"dtypes\"],\n cfg.books[\"books_chunksize\"],\n )\n if txt_to_csv_authors:\n OL_txt_to_csv(\n cfg.authors[\"path_input_authors\"],\n cfg.authors[\"path_splitted_catalog\"],\n cfg.authors[\"path_full_catalog\"],\n cfg.authors[\"name_full_authors_catalog\"],\n cfg.authors[\"authors_cols\"],\n cfg.authors[\"dtypes\"],\n cfg.authors[\"authors_chunksize\"],\n )\n if join_authors_books:\n authors_clean = OL_preprocess_raw_dataset(\n cfg.prp_raw_authors[\"raw_ol_authors_path\"],\n cfg.prp_raw_authors[\"dtypes_auth\"],\n )\n books_clean = OL_preprocess_raw_dataset(\n cfg.prp_raw_books[\"raw_ol_books_path\"],\n cfg.prp_raw_books[\"dtypes_books\"],\n )\n OL_join_authors_books(\n books_clean,\n authors_clean,\n cfg.ol_join[\"output_file_out\"],\n cfg.ol_join[\"name_csv_out\"],\n cfg.ol_join[\"join_mode\"],\n cfg.ol_join[\"join_key\"],\n )\n if enrich_fltr:\n fltr_enr_ol_join, fltr_no_joined = join_fltr_ol(\n cfg.enrich_fltr[\"fltr_cleaned_path\"],\n cfg.enrich_fltr[\"ol_joined_auth_book_path\"],\n cfg.enrich_fltr[\"fltr_left_enr_path_out\"],\n cfg.enrich_fltr[\"fltr_left_enr_name_file_out\"],\n )\n fltr_enr_ol_RL = fltr_RL_ol(\n fltr_no_joined, cfg.enrich_fltr[\"ol_joined_auth_book_path\"]\n )\n fltr_ol_final_enr = merge_enriched_books(\n fltr_enr_ol_join,\n fltr_enr_ol_RL,\n os.path.join(\n cfg.preprocessing_fltr[\"path_file_out\"],\n cfg.preprocessing_fltr[\"name_file_out\"],\n ),\n )\n fltr_enr_mondadori = enr_from_mondadori(\n fltr_ol_final_enr,\n cfg.enrich_mondadori[\"path_output\"],\n cfg.enrich_mondadori[\"name_file_out\"],\n )\n final_hoepli = enr_from_hoepli(\n fltr_enr_mondadori,\n cfg.enrich_hoepli[\"path_output\"],\n cfg.enrich_hoepli[\"name_file_out\"],\n cfg.enrich_hoepli[\"timeout\"],\n )\n if redefine_fltr:\n analysis_data(\n cfg.data_analysis[\"data_set_path\"],\n cfg.data_analysis[\"corrected_year_path\"],\n cfg.data_analysis[\"corrected_category_path\"],\n )\n\n\nif __name__ == \"__main__\":\n # START LOCAL DASK'S CLUSTER, WEB INTERFACE --> http://127.0.0.1:8787/status\n client = Client(n_workers=2, threads_per_worker=2, memory_limit=\"2GB\")\n print(f\"DASK'S CLUSTER INITIALIZE...\\n{client}\")\n main(\n cfg,\n scrape_feltrinelli=False,\n prep_feltrinelli=False,\n txt_to_csv_books=False,\n txt_to_csv_authors=False,\n join_authors_books=False,\n enrich_fltr=False,\n redefine_fltr=False,\n )\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"597953268","text":"\n\n\n\nclass UnBindCard(object):\n\n URI = '/online-quick-api-test/unbundled/unbundledCard'\n\n def __init__(self, args={}):\n self.platformCuId = args.get('merId')\n self.merKey = args.get('merKey')\n self.customerOrderId = args.get('customerOrderId')\n\n\n\n#import json\n#print(json.dumps(BindCardQueryAll('8619133603','sdjp2p2014169'), default=lambda obj: obj.__dict__))\n","sub_path":"projects/djtest/sdj/test/model/unbindCard.py","file_name":"unbindCard.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"184191790","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 22:19:40 2020\n\n@author: Project-C\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 16 15:52:12 2020\n\n@author: Project-C\n\"\"\"\n\n\nfrom os.path import join\nimport csv\nimport math\nimport pandas as pd\nfrom KalmanFilter import KalmanFilter\nfrom KalmanFilter2 import KalmanFilter as KF1D\nimport numpy as np\nimport statistics\ndef loadDataset(name):\n data = []\n with open(name, newline='') as f:\n reader = csv.reader(f)\n reader = list(reader)\n data = reader.copy()\n return data\n\n\ndef getDistance(rssi, a, n):\n return pow(10, -1*((rssi-a)/(10*n)))\n\n\ndef getStatistic(acc):\n dictElement = {}\n for item in acc:\n if item not in dictElement:\n dictElement[item] = 1\n else:\n dictElement[item] += 1\n return dictElement\n\ndef trilateration(r1,r2,r3, x1,x2,x3, y1,y2,y3):\n A = 2*x2 - 2*x1;\n B = 2*y2 - 2*y1;\n C = r1*r1 - r2*r2 - x1*x1 + x2*x2 - y1*y1 + y2*y2;\n D = 2*x3 - 2*x2;\n E = 2*y3 - 2*y2;\n F = r2*r2 - r3*r3 - x2*x2 + x3*x3 - y2*y2 + y3*y3;\n x = (C*E - F*B) / (E*A - B*D);\n y = (C*D - A*F) / (B*D - A*E);\n return x,y\n\ndef avg(lists):\n return sum(lists) / len(lists)\n\ndef avgCol(lists):\n cols = [[] for i in range (len(lists[0])-1)]\n for item in lists:\n for atom in range(1, len(item)):\n cols[atom-1].append(item[atom])\n \n avgs = []\n for item in range(len(cols)):\n avgs.append(avg(cols[item]))\n return avgs\n \ndef cleanNfold (bssid, data, fold, classes):\n clean = []\n for item in data:\n temp = [[] for i in range(len(bssid) +1)]\n for atom in range(len(item)): \n temp[0] = item[0]\n for itemB in range(len(bssid)):\n if item[atom] == bssid[itemB]:\n temp[itemB+1] = int(item[atom+1])\n clean.append(temp)\n \n separated = [[] for i in range(len(classes))]\n for item in clean:\n for itemB in range(len(classes)):\n if (item[0] == classes[itemB]):\n separated[itemB].append(item)\n \n folded = [[]for i in range(len(classes))]\n sep_c = 0\n for item in separated:\n for i in range (int(len(item) / fold)):\n avg_c = avgCol(item[i * fold: (i+1) * fold])\n avg_c.insert(0, item[0][0])\n folded[sep_c].append(avg_c)\n sep_c+=1\n \n return folded\n\n\ndef cleanNfold2 (bssid, data, fold, classes):\n clean = []\n for item in data:\n temp = [[] for i in range(len(bssid) +1)]\n for atom in range(len(item)): \n temp[0] = item[0]\n for itemB in range(len(bssid)):\n if item[atom] == bssid[itemB]:\n temp[itemB+1] = int(item[atom+1])\n clean.append(temp)\n \n separated = [[] for i in range(len(classes))]\n for item in clean:\n for itemB in range(len(classes)):\n if (item[0] == classes[itemB]):\n separated[itemB].append(item)\n \n folded = [[]for i in range(len(classes))]\n sep_c = 0\n for item in separated:\n for i in range (int(len(item) / fold)):\n aitem = item[i * fold: (i+1) * fold]\n for k in aitem:\n KF = [ KF1D(R= 1, Q= 1) for i in range(len(k[1:]))]\n for j in range(len(k[1:])):\n KF[j].filter(k[j+1])\n avg=[k[0]]\n for item_KF in KF:\n avg.append(item_KF.lastMeasurement())\n \n folded[sep_c].append(avg)\n sep_c+=1\n \n return folded\n\n\ndef euclidDist(v1, v2):\n dist = 0\n for item in range(len(v1)):\n dist += (v1[item] - v2[item]) **2\n return math.sqrt(dist)\n\ndef drawRoomOnly():\n \n #drawing the room\n for item in range(len(x)): #drawing the AP\n plt.scatter(x[item], y[item], s = 20, color = \"k\",)\n plt.annotate(\"AP{}-({},{})\".format(item, x[item], y[item]), (x[item] -10, y[item] + 10))\n \n lines = [[[ 0, 0], [600, 0]], #room lines\n [[600, 0], [600, 400]],\n [[600, 400], [300, 400]],\n [[300, 400], [300, 300]],\n [[300, 300], [ 0, 300]],\n [[ 0, 300], [ 0, 0]],\n ]\n for item in lines: #drawing room lines\n plt.plot([item[0][0], item[1][0]], [item[0][1], item[1][1]], color = \"k\")\n \n color = [\"gold\", \"navy\", \"slategrey\", \"springgreen\", \"orangered\", \"aqua\"] \n for item in range(len(trueLoc)):#Drawing the point\n plt.scatter(trueLoc[item][0],trueLoc[item][1], color = color[item])\n plt.annotate(\"{}-({},{})\".format(label_point[item], \n trueLoc[item][0],trueLoc[item][1]), \n (trueLoc[item][0] - 50,trueLoc[item][1] + 10))\n \ndef drawRoom(scattered, title):\n drawRoomOnly()\n color = [\"gold\", \"navy\", \"slategrey\", \"springgreen\", \"orangered\", \"aqua\"] \n est = scattered\n est =np.transpose(est).tolist()\n for item in est:#Drawing the estimated points\n\n plt.scatter(float(item[1]), float(item[2]), \n color = color[ord(item[0]) - ord('a')], \n alpha = \"0.5\",\n s = 2) \n plt.title(title)\n plt.savefig(join(join(\"output\", \"plot\"), title + \"-room accuracy\"))\n plt.show()\n\nif __name__ == \"__main__\":\n \"\"\"\n ==================================\n loading constant variables\n ==================================\n \"\"\"\n \n data = loadDataset(join(\"dataset\", \"all at 16-06-20-15 00.csv\"))\n constant =loadDataset(join(join(join(\"..\",\"Getting constant variables\"),\"output\"), \"constant variables.csv\"))\n \n KF = KalmanFilter(dt = 1, u_x = 1 ,u_y = 1, std_acc = 200 , x_std_meas = 50, y_std_meas = 50 )\n a = []\n n = []\n x = []\n y = []\n bssid = []\n for item in constant[1:]:\n bssid.append(item[1])\n a.append(float(item[2]))\n n.append(float(item[3]))\n x.append(float(item[4]))\n y.append(float(item[5]))\n \n trueLocX = {'a' : 100,\n 'b' : 300,\n 'c' : 500,\n 'd' : 500,\n 'e' : 300,\n 'f' : 100}\n \n trueLocY = {'a' : 100,\n 'b' : 100,\n 'c' : 100,\n 'd' : 300,\n 'e' : 300,\n 'f' : 300}\n \n trueLoc = [ [100, 100], [300,100],\n [500, 100], [500,300],\n [300, 300], [100,300],]\n \n trueDist = [[] for i in range(len(x))]\n for item in range(len(x)):\n d = []\n for atom in trueLoc:\n trueDist[item].append(euclidDist( [x[item], y[item]] , atom))\n \n label_point = list(trueLocX.keys())\n \n \"\"\"\n ==================================\n Measuring distance and position\n ==================================\n \"\"\"\n #average 100 data to one data\n data = cleanNfold2(bssid, data, fold = 100, classes = ['a', 'b', 'c', 'd', 'e', 'f'])\n \n column = [\"label\", \"rssi1\", \"rssi2\", \"rssi3\", \n \"dist1\", \"dist2\", \"dist3\",\n \"estimated x\", \"estimated y\",\n \"predicted x\", \"predicted y\",\n \"measured x\", \"measured y\"\n ]\n column_acc = [\"label\", \n \"acc dist1\", \"acc dist2\", \"acc dist3\",\n \"acc estimated x\", \"acc estimated y\", \"acc estimated\",\n \"acc predicted x\", \"acc predicted y\", \"acc predicted\",\n \"acc measured x\", \"acc measured y\", \"acc measured\",\n ]\n \n routes = []\n routes_acc = []\n point_dict = {}\n point_acc_dict = {}\n for item in label_point:\n point_dict[item] = []\n point_acc_dict[item] = []\n \n started = False\n for item in range(len(data[0])):\n for itemB in range(len(data)):\n \n true_x = trueLocX[data[itemB][item][0]]\n true_y = trueLocY[data[itemB][item][0]]\n point = []\n point_acc= [] \n \n point.append(data[itemB][item][0]) #label\n point_acc.append(data[itemB][item][0]) #label\n \n for i in range (1, len(data[itemB][item])):\n point.append(data[itemB][item][i]) #rssi\n \n \"\"\"\n ------------\n Estimating distance\n ---------------------------\n \"\"\"\n for i in range (1, len(data[itemB][item])):\n d = getDistance(data[itemB][item][i], a[i-1], n[i-1]) * 100\n point.append(d)\n point_acc.append(abs(d - trueDist[i-1][ itemB])) #distance\n \n \n \"\"\"\n ------------\n Estimating Location\n ---------------------------\n \"\"\"\n #trilateration\n est_x, est_y = trilateration(point[4], point[5], point[6], x[0],x[1],x[2], y[0],y[1],y[2])\n acc_x = abs(est_x - true_x)\n acc_y = abs(est_y - true_y)\n if (started == False):\n KF.start_point(est_x, est_y)\n started = True\n #kalman filtering\n pred = KF.predict().tolist()\n upd = KF.update([[est_x], [est_y]]).tolist()\n \n pred_x = pred[0][0]\n pred_y = pred[1][0]\n upd_x = upd[0][0]\n upd_y = upd[1][0]\n \n #calculating accuracy\n acc_pred_x = abs(pred_x -true_x)\n acc_pred_y = abs(pred_y -true_y)\n acc_upd_x = abs(upd_x -true_x)\n acc_upd_y = abs(upd_y -true_y)\n \n \"\"\"\n ----------\n Updating the frame\n --------------------------\n \"\"\"\n \n point.append(est_x) #estimation x coordinate by trilateration\n point.append(est_y) #estimation y coordinate by trilateration\n point.append(pred_x) #prediction x coordinate by kalman filter\n point.append(pred_y) #prediction y coordinate by kalman filter\n point.append(upd_x) #measured x coordinate by kalman filter\n point.append(upd_y) #measured x coordinate by kalman filter\n \n point_acc.append(acc_x) #accuracy x coordinate by trilateration\n point_acc.append(acc_y) #accuracy x coordinate by trilateration\n point_acc.append(euclidDist([est_x,est_y], [true_x,true_y])) #accuracy overall coordinate by trilateration\n \n point_acc.append(acc_pred_x) #accuracy x coordinate by kalman filter predicted coordinate\n point_acc.append(acc_pred_y) #accuracy y coordinate by kalman filter predicted coordinate\n point_acc.append(euclidDist([pred_x,pred_y], [true_x,true_y])) #accuracy coordinate by kalman filter predicted coordinate\n \n point_acc.append(acc_upd_x) #accuracy x coordinate by kalman filter measured coordinate\n point_acc.append(acc_upd_y) #accuracy x coordinate by kalman filter measured coordinate\n point_acc.append(euclidDist([upd_x,upd_y], [true_x,true_y])) #accuracy coordinate by kalman filter measured coordinate\n \n routes.append(point)\n routes_acc.append(point_acc)\n point_dict[point[0]].append(point)\n point_acc_dict[point[0]].append(point_acc)\n \n route_frame = pd.DataFrame(columns = column, data = routes)\n route_acc_frame = pd.DataFrame(columns = column_acc, data = routes_acc)\n \n for item in label_point:\n point_dict[item] = pd.DataFrame(columns = column, data = point_dict[item])\n point_acc_dict[item] = pd.DataFrame(columns = column_acc, data = point_acc_dict[item])\n \n \n \"\"\"\n ==================================\n Creating summary\n ==================================\n \"\"\"\n column_summary= [\"label\", \"acc x\", \"acc y\", \"acc\"]\n data = [[\"acc_estimated\" , avg(route_acc_frame['acc estimated x']),\n avg(route_acc_frame['acc estimated y']) , avg(route_acc_frame['acc estimated'])],\n \n [\"acc_predicted\", avg(route_acc_frame['acc predicted x']),\n avg(route_acc_frame['acc predicted y']) , avg(route_acc_frame['acc predicted'])],\n \n [\"acc_measured\",avg(route_acc_frame['acc measured x']),\n avg(route_acc_frame['acc measured y']) , avg(route_acc_frame['acc measured'])],\n ]\n \n summary_frame = pd.DataFrame(columns = column_summary, data = data)\n \n data_per_point = []\n for item in label_point:\n data_now = [item, \n avg(point_acc_dict[item][\"acc dist1\"]),\n avg(point_acc_dict[item][\"acc dist2\"]),\n avg(point_acc_dict[item][\"acc dist3\"]),\n \n avg(point_acc_dict[item][\"acc estimated x\"]),\n avg(point_acc_dict[item][\"acc estimated y\"]),\n avg(point_acc_dict[item][\"acc estimated\"]),\n \n avg(point_acc_dict[item][\"acc predicted x\"]),\n avg(point_acc_dict[item][\"acc predicted y\"]),\n avg(point_acc_dict[item][\"acc predicted\"]),\n \n avg(point_acc_dict[item][\"acc measured x\"]),\n avg(point_acc_dict[item][\"acc measured y\"]),\n avg(point_acc_dict[item][\"acc measured\"]),\n ]\n data_per_point.append(data_now)\n \n summary_per_point_frame = pd.DataFrame(columns = column_acc, data = data_per_point)\n \n \n \n \n \"\"\"\n ==================================\n Visualizing\n ==================================\n \"\"\"\n \n \"\"\"\n ------------\n visualizing room\n ---------------------------\n \"\"\"\n \n \n import matplotlib.pyplot as plt\n \n est = [route_frame[\"label\"].values.tolist(), \n route_frame[\"estimated x\"].values.tolist(), \n route_frame[\"estimated y\"].values.tolist()]\n drawRoom(est, \"Estimated\")\n \n \n est = [route_frame[\"label\"].values.tolist(), \n route_frame[\"predicted x\"].values.tolist(), \n route_frame[\"predicted y\"].values.tolist()]\n drawRoom(est, \"Predicted \")\n \n est = [route_frame[\"label\"].values.tolist(), \n route_frame[\"measured x\"].values.tolist(), \n route_frame[\"measured y\"].values.tolist()]\n drawRoom(est, \"Measured \")\n \n \n\n \"\"\"\n ------------\n visualizing walk\n ---------------------------\n \"\"\"\n \n walk = route_frame.values.tolist()[:7]\n \n \n walk = np.transpose(walk).tolist()\n \n drawRoomOnly()\n plt.plot( [float(i) for i in walk[7]], \n [float(i) for i in walk[8]], \n color=\"blue\", alpha=(0.4), label= \"Hasil trilaterasi\")\n plt.legend()\n plt.scatter([float(i) for i in walk[7]], \n [float(i) for i in walk[8]], \n color=\"blue\", alpha=(0.4), s = 30)\n \n plt.plot([float(i) for i in walk[11]] ,\n [float(i) for i in walk[12]], color=\"red\", alpha=(0.4), label= \"Koreksi oleh kalman filter\")\n plt.legend()\n plt.scatter([float(i) for i in walk[11]] ,\n [float(i) for i in walk[12]], color=\"red\", alpha=(0.4), s = 30)\n plt.show()\n \n \"\"\"\n ------------\n visualizing overall accuracy \n ---------------------------\n \"\"\"\n \n x_frame = list(range(len(route_acc_frame['acc estimated'])))\n \n \n plt.plot(x_frame, route_acc_frame['acc estimated'], color = \"r\", label= \"estimated - trilateration\", alpha=0.5)\n #plt.plot(x_frame, route_acc_frame['acc predicted'], color = \"g\", label= \"predicted - kalman filter\", alpha=0.5)\n plt.plot(x_frame, route_acc_frame['acc measured'], color = \"b\", label= \"measured- kalman filter\", alpha=0.5)\n plt.legend()\n plt.title (\"Accuracy\")\n \n plt.savefig(join(join(\"output\", \"plot\"), \"Overall accuracy\"))\n plt.show()\n \n \"\"\"\n ==================================\n Printing\n ==================================\n \"\"\"\n route_acc_frame.to_csv(join(join(\"output\", \"acc coord\"), \"route_acc.csv\"), \n index=False, header = True)\n route_frame.to_csv(join(join(\"output\", \"acc coord\"), \"route.csv\"), \n index=False, header = True)\n \n summary_frame.to_csv(join(join(\"output\", \"acc coord\"), \"summary.csv\"), \n index=False, header = True)\n \n summary_per_point_frame.to_csv(join(join(\"output\", \"acc coord\"), \"summary_per_point.csv\"), \n index=False, header = True)\n ","sub_path":"Getting the constant variables/tes.py","file_name":"tes.py","file_ext":"py","file_size_in_byte":16561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"629070596","text":"import os\nimport jsonpickle as jsp\n\njsp.set_encoder_options('json',indent=4, separators=(',',':'), ensure_ascii=False)\n\nclass File_IO:\n def __init__(self,filename):\n self.filename = filename\n\n\n def write(self, value):\n file = open(self.filename,'w',encoding='utf-8')\n file.write(value)\n file.close()\n \n def read(self):\n file = open(self.filename, 'r', encoding='utf-8')\n lines = file.read()\n file.close()\n return lines\n\n\nclass School:\n def __init__(self, name, adderess):\n self.__name = name\n self.__address = adderess\n self.__students = []\n\n @property\n def name(self):\n return self.__name\n \n @name.setter\n def name(self, name):\n self.__name = name\n\n @property \n def address(self):\n return self.__address\n \n @address.setter\n def address(self,address):\n self.__address = address\n\n @property\n def count_students(self):\n print(type(self.__students))\n return len(self.__students)\n\n def add_student(self,student):\n self.__students.append(student)\n\n def delete_student(self, index):\n print('Вы действительно хотите удалить {}?'.format(self.__students[index].name))\n confirm = input()\n if (confirm.lower() == 'y'):\n self.__students.pop(index)\n\n\n def get_all_students(self):\n return self.__students\n \nclass Student():\n def __init__(self,name, age, class_number):\n self.__name, self.__age, self.__class_number = name, age, class_number\n\n @property\n def name(self):\n return self.__name\n\n @property\n def age(self):\n return self.__age\n\n @age.setter\n def age(self,age):\n self.__age = age\n\n @property\n def class_number(self):\n return self.__class_number\n\n @class_number.setter\n def class_number(self, class_number):\n self.__class_number = class_number\n\nclass Storage():\n def __init__(self,path):\n self.__storage = []\n self.__path = path\n self.__file = File_IO(self.__path)\n if (os.path.exists(self.__path)):\n self.__storage = jsp.decode(self.__file.read())\n\n def get_all(self):\n return self.__storage\n\n def count(self):\n return len(self.__storage)\n\n def add(self, item):\n self.__storage.append(item)\n self.save()\n\n def edit(self, index,value):\n self.__storage[index] = value\n self.save()\n\n def remove(self,n):\n self.__storage.pop(n)\n self.save()\n\n def save(self):\n self.__file.write(jsp.encode(self.__storage))\n\nclass SchoolsStorage(Storage):\n def __init__(self):\n path = 'schools.json'\n super().__init__(path)\n\n\nschools_storage = SchoolsStorage()\n\ndef print_schools():\n schools = schools_storage.get_all()\n print('{3:^5}|{0:^15}|{1:^30}|{2:^15}'.format('Название','Адрес','Кол-во учеников','#'))\n for i,line in enumerate(schools):\n #name, count, result = line\n print('{3:^5}|{0:<15}|{1:^30}|{2:^15}'.format(line.name,line.address,line.count_students, i+1))\n\ndef print_students(school):\n print('Школа {}'.format(school.name))\n print('{0:5}|{1:^15}|{2:^10}|{3:^15}'.format('#','ФИО','Класс','Возраст'))\n for i, line in enumerate(school.get_all_students()):\n #name, count, result = line\n print('{0:5}|{1:^15}|{2:^10}|{3:^15}'.format(i+1,line.name,line.class_number,line.age))\n\ndef delete_student():\n print_schools()\n while True:\n school_number = input('Введите номер школы ')\n if (school_number.isdigit() and int(school_number)<=schools_storage.count()):\n school_number_i = int(school_number)\n break\n \n\n print_students(schools_storage.get_all()[school_number_i-1])\n \n while True:\n student_number = input('Введите номер ученика ')\n if (student_number.isdigit() and int(student_number)<=schools_storage.get_all()[school_number_i-1].count_students):\n student_number_i = int(student_number)\n break\n schools_storage.get_all()[school_number_i-1].delete_student(student_number_i-1)\n\n schools_storage.save()\n\ndef add_student():\n print_schools()\n while True:\n school_number = input('Введите номер школы ')\n if (school_number.isdigit() and int(school_number)<=schools_storage.count()):\n school_number_i = int(school_number)\n break\n \n while True:\n name = input('Имя ')\n if (len(name)>3):\n break\n else:\n print('Слишком короткое имя')\n \n while True:\n input_class_number = input('Класс ')\n if (input_class_number.isdigit() and int(input_class_number) in range(1,12)):\n class_number = int(input_class_number)\n break\n else:\n print('Не верный ввод')\n \n while True:\n input_age = input('Возраст ')\n if (input_age.isdigit() and int(input_age) in range(6,20)):\n age = int(input_age)\n break\n else:\n print('Не верный ввод')\n\n student = Student(name, age, class_number)\n schools_storage.get_all()[school_number_i-1].add_student(student)\n schools_storage.save()\n\ndef add_school():\n while True:\n name = input('Название ')\n if (len(name)>3):\n break\n else:\n print('Слишком короткое название')\n \n while True:\n address = input('Адрес ')\n if (len(address)>3):\n break\n else:\n print('Слишком короткий адрес')\n \n school = School(name, address)\n schools_storage.add(school)\n \ndef edit_school():\n print_schools()\n while True:\n school_number = input('Введите номер школы ')\n if (school_number.isdigit() and school_number3):\n break\n else:\n print('Слишком короткое название')\n \n while True:\n address = input('Адрес enter - оставить {} '.format(old_school.address))\n if (len(address)>3):\n break\n else:\n print('Слишком короткий адрес')\n \n school = School(name, address)\n schools_storage.edit(school_number_i, school)\n\n\n\ndef menu():\n print('1 - Добавить школу')\n print('2 - Получить информацию о школе')\n print('3 - Изменить информацию о школе')\n print('4 - Просмотреть учеников школы')\n print('5 - Добавить ученика в школу')\n print('6 - Исключить ученика из школы')\n print('0 - Выход')\n while True:\n user_input = input(':> ')\n if (user_input.isdigit() and int(user_input) in range(0,7)):\n return int(user_input)\n else:\n print('Команда не распознана')\n\n\nwhile True:\n user_answer = menu()\n\n if (user_answer == 1):\n add_school()\n elif (user_answer == 2):\n print_schools()\n elif (user_answer == 3):\n edit_school()\n elif (user_answer == 4):\n print_schools()\n while True:\n school_number = input('Введите номер школы ')\n if (school_number.isdigit() and int(school_number)<=schools_storage.count()):\n school_number_i = int(school_number)\n break\n print_students(schools_storage.get_all()[school_number_i-1])\n elif (user_answer == 5):\n add_student()\n elif (user_answer == 6):\n delete_student()\n elif (user_answer == 0):\n print('Всего доброго!')\n break\n\n\n\n","sub_path":"diplom/diplom.py","file_name":"diplom.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"363844360","text":"import pickle\nimport re\nimport torch\nfrom keras.preprocessing.sequence import pad_sequences\nfrom preprocess import SwagExample, convert_examples_to_features\nfrom utils import select_field\nimport os\n\n\ndef word_tokenize(sentence, tokenizer):\n tokens = tokenizer.tokenize(sentence)\n temp = ''\n for word in tokens:\n if word.startswith('##'):\n word = word[2:]\n temp += word\n else:\n temp += ' '\n temp += word\n temp = temp.strip()\n\n return temp.split(' ')\n\n\ndef is_similar(word_1, word_2, stop=False):\n word_1 = simplify(word_1)\n word_2 = simplify(word_2)\n flag = False\n if ('(' in word_1 or '(' in word_2) and not stop:\n _word_1 = re.sub(u\"\\\\(.*?\\\\)|\\\\{.*?}|\\\\[.*?]\", \"\", word_1)\n _word_2 = re.sub(u\"\\\\(.*?\\\\)|\\\\{.*?}|\\\\[.*?]\", \"\", word_2)\n flag = is_similar(_word_1, _word_2, True)\n\n return word_1 == word_2 or \\\n word_1 in word_2 or \\\n word_2 in word_1 or \\\n min_distance(word_1, word_2) <= 2 or \\\n flag\n\n\ndef simplify(word):\n new_word = word.lower().replace('( ', '(').replace(' )', ')').replace(' ', ' ').replace(' ,', ',').strip()\n return new_word\n\n\ndef min_distance(str1, str2):\n matrix = [[i + j for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]\n\n for i in range(1, len(str1) + 1):\n for j in range(1, len(str2) + 1):\n if str1[i - 1] == str2[j - 1]:\n d = 0\n else:\n d = 1\n matrix[i][j] = min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + d)\n\n return matrix[len(str1)][len(str2)]\n\n\ndef get_train_src_tar_txt(train_txt_path):\n src = []\n tar_1 = []\n tar_2 = []\n txt = ''\n try:\n txt += open(train_txt_path, 'r').read()\n except:\n txt += open(train_txt_path, 'r', encoding='utf-8').read()\n\n txt = txt.split('\\n\\n')\n for para in txt:\n sentences = para.split('\\n')\n if len(sentences) < 2:\n continue\n for sid, sentence in enumerate(sentences[0:3]):\n if sid == 0:\n src.append(sentence)\n elif sid == 1:\n tar_1.append(sentence)\n elif sid == 2:\n tar_2.append(sentence)\n return src, tar_1, tar_2\n\n\ndef get_test_src_tar_txt(test_txt_path, tokenizer):\n txt = open(test_txt_path, 'r').read()\n # txt = txt.lower()\n txt = txt.split('\\n\\n')\n src = []\n tar_1 = []\n tar_2 = []\n cudics = []\n tars = []\n for para in txt:\n sentences = para.split('\\n')\n src_sentence = ''\n _tars = []\n _cudics = []\n if len(sentences) < 2 or len(sentences[0]) < 3 or len(sentences[1]) < 3:\n continue\n for sid, sentence in enumerate(sentences):\n if sid == 0:\n src.append(sentence)\n else:\n cudic = {}\n sentence = sentence[2:].lower()\n sentence = sentence.replace('].', '] .')\n text = re.sub('\\[[^\\[\\]]*\\]', '', sentence)\n pairs = re.findall('[^\\[\\] ]+\\[[^\\[\\]]+\\]', sentence)\n for pair in pairs:\n pair = re.split('[\\[\\]]', pair)\n cudic[pair[0]] = pair[1]\n words = word_tokenize(text, tokenizer)\n for wid, word in enumerate(words):\n if word in cudic.keys():\n words[wid] = cudic[word]\n new_text = ' '.join(words)\n if sid == 1:\n tar_1.append(new_text)\n elif sid == 2:\n tar_2.append(new_text)\n _tars.append(new_text)\n _cudics.append(cudic)\n tars.append(_tars)\n cudics.append(_cudics)\n\n with open('./data/test_cudics.pkl', 'wb') as f:\n pickle.dump(cudics, f)\n with open('./data/test_tars.pkl', 'wb') as f:\n pickle.dump(tars, f)\n\n return src, tar_1, tar_2\n\n\ndef get_one_sample(src_words, key, key_tar, abbrs, max_pad_length, max_dcmn_seq_length, tokenizer):\n if key in abbrs.keys():\n pass\n elif key.upper() in abbrs.keys():\n key = key.upper()\n elif key.lower() in abbrs.keys():\n key = key.lower()\n\n choices = []\n\n if key in abbrs.keys() and key_tar is not None:\n temp = [' '.join(src_words), 'what is {} ?'.format(key)]\n label = -1\n skip_cnt = 0\n for index, u in enumerate(abbrs[key]):\n if index - skip_cnt >= max_pad_length - 2:\n break\n if len(u.split(' ')) > 10:\n skip_cnt += 1\n continue\n temp.append(u)\n choices.append(u)\n if is_similar(u, key_tar):\n label = index - skip_cnt\n while len(temp) < max_pad_length:\n temp.append('[PAD]')\n choices.append('[PAD]')\n\n if len(tokenizer.tokenize(temp[0])) + len(tokenizer.tokenize(temp[1])) + len(\n tokenizer.tokenize(temp[2])) >= max_dcmn_seq_length \\\n or label < 0 or label >= max_pad_length - 2:\n return None, None, None\n else:\n return temp, label, choices\n else:\n # return None, None, None\n temp = [' '.join(src_words), 'what is {} ?'.format(key), key]\n choices.append(key)\n while len(temp) < max_pad_length:\n temp.append('[PAD]')\n choices.append('[PAD]')\n if len(tokenizer.tokenize(temp[0])) + len(tokenizer.tokenize(temp[1])) + len(\n tokenizer.tokenize(temp[2])) >= max_dcmn_seq_length:\n return None, None, None\n return temp, 0, choices\n\n\ndef get_dcmn_data_from_gt(src_words, tar_words, abbrs, max_pad_length, max_dcmn_seq_length, tokenizer):\n if tar_words[-1] != '.':\n tar_words.append('.')\n i = 0\n j = 0\n sentences = []\n labels = []\n key_choices = []\n seq_src_words = src_words[:]\n indics = []\n key_ans = {}\n\n while i < len(src_words):\n if j == len(tar_words):\n break\n if src_words[i] == tar_words[j]:\n i += 1\n j += 1\n else:\n p = i + 1\n q = j + 1\n\n while p < len(src_words):\n while q < len(tar_words) and tar_words[q] != src_words[p]:\n q += 1\n if q == len(tar_words):\n p = p + 1\n q = j + 1\n else:\n break\n aft = \" \".join(tar_words[j:q])\n for k, word in enumerate(src_words[i:p]):\n temp, label, choices = get_one_sample(src_words, word, aft, abbrs, max_pad_length, max_dcmn_seq_length, tokenizer)\n if temp is not None:\n sentences.append(temp)\n labels.append(label)\n key_choices.append(choices)\n key_ans[word] = temp[label + 2]\n seq_src_words[i+k] = '[UNK]'\n indics.extend([j,q])\n\n i = p\n j = q\n\n\n seq_tar_words = []\n for i,word in enumerate(tar_words):\n if i in indics:\n seq_tar_words.append('[MASK]')\n seq_tar_words.append(word)\n\n return sentences, labels, ' '.join(seq_src_words), key_ans, key_choices, ' '.join(seq_tar_words)\n\n\ndef get_dcmn_data_from_step1(src_words, masks, k_a, abbrs, max_pad_length, max_dcmn_seq_length, tokenizer):\n sentences = []\n seq_src_words = src_words[:]\n labels = []\n key_choices = []\n for i, mask in enumerate(masks):\n if mask == 0:\n continue\n key = src_words[i]\n if key in abbrs.keys():\n pass\n elif key.upper() in abbrs.keys():\n key = key.upper()\n elif key.lower() in abbrs.keys():\n key = key.lower()\n\n if key in k_a.keys():\n aft = k_a[key]\n elif key in abbrs.keys() and len(abbrs[key]) == 1:\n aft = abbrs[key][0]\n else:\n aft = None\n temp, label, choices = get_one_sample(src_words, key, aft, abbrs, max_pad_length, max_dcmn_seq_length, tokenizer)\n if temp is not None:\n sentences.append(temp)\n labels.append(label)\n key_choices.append(choices)\n seq_src_words[i] = '[UNK]'\n\n return sentences, labels, ' '.join(seq_src_words), key_choices\n\n\ndef seq_tokenize(input_data, config):\n ids = []\n for data in input_data:\n words = config.tokenizer.tokenize(data)\n ids.append(words)\n\n ids = pad_sequences([config.tokenizer.convert_tokens_to_ids(txt) for txt in ids],\n maxlen=config.max_seq_length, dtype=\"long\", value=0,\n truncating=\"post\", padding=\"post\")\n masks = [[float(i != 0.0) for i in ii] for ii in ids]\n\n ids = torch.LongTensor(ids).to(config.seq_device)\n masks = torch.LongTensor(masks).to(config.seq_device)\n\n return ids, masks\n\n\ndef build_dataset(config):\n abbrs_path = './data/abbrs-all-cased.pkl'\n # txt_path = './data/train(12809).txt'\n txt_path = os.path.join(config.data_dir, config.train_file)\n with open(abbrs_path, 'rb') as f:\n abbrs = pickle.load(f)\n src_txt, tar_1_txt, tar_2_txt = get_train_src_tar_txt(txt_path)\n # src_txt = src_txt[:100]\n # tar_1_txt = tar_1_txt[:100]\n # tar_2_txt = tar_2_txt[:100]\n\n seq_srcs = []\n seq_tars = []\n dcmn_srcs = []\n dcmn_labels = []\n key_choices = []\n\n for i, (src, tar) in enumerate(zip(src_txt, tar_1_txt)):\n src = word_tokenize(src, config.tokenizer)\n tar = word_tokenize(tar, config.tokenizer)\n sentences, labels, _src, key_ans, k_c, _tar = get_dcmn_data_from_gt(src, tar, abbrs,\n max_pad_length=config.num_choices + 2,\n max_dcmn_seq_length=config.max_seq_length,\n tokenizer=config.tokenizer)\n if len(sentences) != _src.count('[UNK]'):\n print(i, src, len(sentences))\n dcmn_srcs.extend(sentences)\n dcmn_labels.extend(labels)\n seq_srcs.append(_src)\n seq_tars.append(_tar)\n key_choices.append(k_c)\n\n for i in range(len(seq_srcs)):\n seq_srcs[i] = '[CLS] ' + seq_srcs[i] + ' [SEP]'\n\n q_id = [i + 1 for i in range(len(dcmn_labels))]\n article = [u[0] for u in dcmn_srcs]\n question = [u[1] for u in dcmn_srcs]\n cts = []\n for i in range(config.num_choices):\n cts.append([u[i + 2] for u in dcmn_srcs])\n\n examples = [\n SwagExample(\n swag_id=s5,\n context_sentence=s1,\n start_ending=s2,\n endings=s3,\n label=s4,\n ) for i, (s1, s2, *s3, s4, s5) in\n enumerate(zip(article, question, *cts, dcmn_labels, q_id))\n ]\n\n features = convert_examples_to_features(examples, config.tokenizer, config.max_seq_length)\n input_ids = select_field(features, 'input_ids')\n input_mask = select_field(features, 'input_mask')\n segment_ids = select_field(features, 'segment_ids')\n doc_len = select_field(features, 'doc_len')\n ques_len = select_field(features, 'ques_len')\n option_len = select_field(features, 'option_len')\n labels = [f.label for f in features]\n\n dcmn_contents = []\n for i in range(len(input_ids)):\n dcmn_contents.append((input_ids[i], input_mask[i], segment_ids[i], doc_len[i], ques_len[i], option_len[i], labels[i]))\n\n seq_contents = []\n for i in range(len(seq_srcs)):\n seq_contents.append((seq_srcs[i], seq_tars[i], key_choices[i]))\n\n return seq_contents, dcmn_contents\n\n\ndef build_dataset_eval(config):\n abbrs_path = './data/abbrs-all-cased.pkl'\n # txt_path = './data/test(2030).txt'\n txt_path = os.path.join(config.data_dir, config.test_file)\n\n with open(abbrs_path, 'rb') as f:\n abbrs = pickle.load(f)\n src_txt, tar_1_txt, tar_2_txt = get_test_src_tar_txt(txt_path, config.tokenizer)\n seq_srcs = []\n dcmn_srcs = []\n dcmn_labels = []\n key_choices = []\n\n with open('./data/test_mask_step2_2030.pkl', 'rb') as f:\n mask_step1 = pickle.load(f)\n\n k_as = []\n for i, (src, tar) in enumerate(zip(src_txt, tar_1_txt)):\n src = word_tokenize(src, config.tokenizer)\n tar = word_tokenize(tar, config.tokenizer)\n sentences, labels, _src, key_ans, _, _tar = get_dcmn_data_from_gt(src, tar, abbrs,\n max_pad_length=config.num_choices + 2,\n max_dcmn_seq_length=config.max_seq_length,\n tokenizer=config.tokenizer)\n k_as.append(key_ans)\n\n for i, (sts, masks, k_a) in enumerate(zip(src_txt, mask_step1, k_as)):\n sts = word_tokenize(sts, config.tokenizer)\n assert len(sts) == len(masks)\n sentences, labels, _src, k_cs = get_dcmn_data_from_step1(sts, masks, k_a, abbrs,\n max_pad_length=config.num_choices + 2,\n max_dcmn_seq_length=config.max_seq_length,\n tokenizer=config.tokenizer)\n dcmn_srcs.extend(sentences)\n dcmn_labels.extend(labels)\n if len(sentences) != _src.count('[UNK]'):\n print(i, sts)\n seq_srcs.append(_src)\n key_choices.append(k_cs)\n\n for i in range(len(seq_srcs)):\n seq_srcs[i] = '[CLS] ' + seq_srcs[i] + ' [SEP]'\n\n cudics = pickle.load(open('./data/test_cudics.pkl', 'rb'))\n seq_tars = pickle.load(open('./data/test_tars.pkl', 'rb'))\n\n q_id = [i + 1 for i in range(len(dcmn_labels))]\n article = [u[0] for u in dcmn_srcs]\n question = [u[1] for u in dcmn_srcs]\n cts = []\n for i in range(config.num_choices):\n cts.append([u[i + 2] for u in dcmn_srcs])\n\n examples = [\n SwagExample(\n swag_id=s5,\n context_sentence=s1,\n start_ending=s2,\n endings=s3,\n label=s4,\n ) for i, (s1, s2, *s3, s4, s5) in\n enumerate(zip(article, question, *cts, dcmn_labels, q_id))\n ]\n\n features = convert_examples_to_features(examples, config.tokenizer, config.max_seq_length)\n input_ids = select_field(features, 'input_ids')\n input_mask = select_field(features, 'input_mask')\n segment_ids = select_field(features, 'segment_ids')\n doc_len = select_field(features, 'doc_len')\n ques_len = select_field(features, 'ques_len')\n option_len = select_field(features, 'option_len')\n labels = [f.label for f in features]\n\n dcmn_contents = []\n for i in range(len(input_ids)):\n dcmn_contents.append((input_ids[i], input_mask[i], segment_ids[i], doc_len[i], ques_len[i], option_len[i], labels[i]))\n\n seq_contents = []\n for i in range(len(seq_srcs)):\n seq_contents.append((seq_srcs[i], seq_tars[i], cudics[i], key_choices[i]))\n\n return seq_contents, dcmn_contents\n\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":15258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"510416792","text":"import sys\nimport pygame\nimport random\nimport time\nimport ItemDragonBall\nimport ItemNangCap\nimport DauThan\nimport NutBam\nimport QuaiVat\nimport Dan\nimport PhiThuyen\nimport Diem\n\nclass BanPhiThuyen:\n def __init__(self):\n pygame.init()\n self.manhinh = pygame.display.set_mode((1000, 899))\n\n pygame.display.set_caption(r\"Game Dragon Ball Super\")\n\n pygame.mixer.music.load('D:\\BanPhiThuyen\\PicAndMusic\\musicnen.mp3')\n pygame.mixer.music.play(loops=-1)\n pygame.mixer.music.set_volume(0.05)\n self.phithuyen = PhiThuyen.PhiThuyen(self)\n\n self.dan = pygame.sprite.Group()\n self.quaivat = pygame.sprite.Group()\n self.dauthan = pygame.sprite.Group()\n for i in range(self.phithuyen.sophithuyen):\n dauthan = DauThan.DauThan(self)\n dauthan.rect.x = i*dauthan.rect.width\n self.dauthan.add(dauthan)\n\n self.itemnangcap = pygame.sprite.Group()\n itemnangcap = ItemNangCap.ItemNangCap(self)\n self.itemnangcap.add(itemnangcap)\n\n self.itemdragonball = pygame.sprite.Group()\n itemdragonball = ItemDragonBall.ItemDragonBall(self)\n self.itemdragonball.add(itemdragonball)\n\n linkPlay = 'D:\\BanPhiThuyen\\PicAndMusic\\PlayBut.png'\n linkQuit = 'D:\\BanPhiThuyen\\PicAndMusic\\QuitBut.png'\n #linkSetting = 'D:\\BanPhiThuyen\\PicAndMusic\\SettingBut.png'\n self.nutplay = NutBam.NutBam(self, linkPlay, 430, 200)\n self.nutquit = NutBam.NutBam(self, linkQuit, 430, 350)\n #self.nutsetting = NutBam.NutBam(self, linkSetting, 430, 500)\n\n self.dangchoi = False\n #self.dangsetting = False\n self.scores = 0\n self.speedqv = 3\n self.diem = Diem.Diem(self, f'Scores: {self.scores}')\n\n def capnhatscore(self):\n self.diem = Diem.Diem(self, f'Scores: {self.scores}'.format())\n\n def taoquaivat(self):\n if self.scores < 200:\n soda = 4\n self.speedqv = 3\n elif self.scores >= 200 and self.scores < 400:\n soda = 5\n self.speedqv = 4\n elif self.scores >= 400 and self.scores < 700:\n soda = 6\n self.speedqv = 5\n elif self.scores >= 700 and self.scores < 1000:\n soda = 10\n self.speedqv = 7\n elif self.scores >= 1000 and self.scores < 1500:\n soda = 15\n self.speedqv = 9\n elif self.scores >= 1500:\n soda = 20\n self.speedqv = 13\n for i in range(soda):\n quaivat = QuaiVat.QuaiVat(self)\n quaivat.rect.x = random.randint(0, 1000)\n quaivat.rect.y = 0\n self.quaivat.add(quaivat)\n\n def capnhatdauthan(self, sodau):\n for i in range(sodau):\n dauthan = DauThan.DauThan(self)\n dauthan.rect.x = i*dauthan.rect.width\n self.dauthan.add(dauthan)\n\n def capnhatitemnangcap(self):\n rd = random.randint(0, 20)\n if rd == 1:\n itemnangcap = ItemNangCap.ItemNangCap(self)\n itemnangcap.rect.x = random.randint(0, 1000)\n itemnangcap.rect.y = 0\n self.itemnangcap.add(itemnangcap)\n\n def capnhatitemdragonball(self):\n rd = random.randint(0, 20)\n if rd == 1:\n itemdragonball = ItemDragonBall.ItemDragonBall(self)\n itemdragonball.rect.x = random.randint(0, 1000)\n itemdragonball.rect.y = 0\n self.itemdragonball.add(itemdragonball)\n\n def main(self):\n while True:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n self.phithuyen.mousegoku(mouse_x, mouse_y)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n self.phithuyen.quaphai = True\n if event.key == pygame.K_LEFT:\n self.phithuyen.quatrai = True\n if event.key == pygame.K_UP:\n self.phithuyen.lentren = True\n if event.key == pygame.K_DOWN:\n self.phithuyen.xuongduoi = True\n elif event.key == pygame.K_SPACE:\n if self.dangchoi:\n for i in range(self.phithuyen.sotiadan):\n if i % 2 == 0:\n chanle = -1\n else:\n chanle = 1\n tmp = Dan.Dan(self)\n if i < 3:\n tmp.rect.x += ((i+1)//2)*(tmp.rect.width)*(chanle)\n else:\n tmp.rect.x += ((i + 1) // 2) * (tmp.rect.width) * (chanle) * 2\n self.dan.add(tmp)\n shootWeapon = pygame.mixer.Sound(\"D:\\BanPhiThuyen\\PicAndMusic\\musiclaze.mp3\")\n shootWeapon.set_volume(0.03)\n shootWeapon.play()\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n self.phithuyen.quaphai = False\n if event.key == pygame.K_LEFT:\n self.phithuyen.quatrai = False\n if event.key == pygame.K_UP:\n self.phithuyen.lentren = False\n if event.key == pygame.K_DOWN:\n self.phithuyen.xuongduoi = False\n elif event.type == pygame.MOUSEBUTTONUP:\n pos = pygame.mouse.get_pos()\n if self.nutplay.rect.collidepoint(pos) and not self.dangchoi:\n self.dangchoi = True\n self.phithuyen.sophithuyen = 3\n self.scores = 0\n self.capnhatscore()\n pygame.mixer.music.load(\"D:\\BanPhiThuyen\\PicAndMusic\\musicnen1.mp3\")\n pygame.mixer.music.play(loops=-1)\n pygame.mixer.music.set_volume(0.1)\n if self.nutquit.rect.collidepoint(pos) and not self.dangchoi:\n sys.exit()\n #if self.nutsetting.rect.collidepoint(pos) and not self.dangchoi:\n #self.dangsetting = True\n #for quaivat in self.quaivat.sprites():\n # if quaivat.rect.right >= self.manhinh.get_rect().right:\n # quaivat.quatrai = True\n # quaivat.quaphai = False\n # if quaivat.rect.left <= 0:\n # quaivat.quatrai = False\n # quaivat.quaphai = True\n\n #Xử lý các item chạm vào vách màn hình\n for itemnangcap in self.itemnangcap.sprites():\n if itemnangcap.rect.right >= self.manhinh.get_rect().right:\n itemnangcap.quatrai = True\n itemnangcap.quaphai = False\n if itemnangcap.rect.left <= 0:\n itemnangcap.quatrai = False\n itemnangcap.quaphai = True\n for itemdragonball in self.itemdragonball.sprites():\n if itemdragonball.rect.right >= self.manhinh.get_rect().right:\n itemdragonball.quatrai = True\n itemdragonball.quaphai = False\n if itemdragonball.rect.left <= 0:\n itemdragonball.quatrai = False\n itemdragonball.quaphai = True\n\n #Cập nhật di chuyển của các đối tượng\n if self.dangchoi:\n self.phithuyen.capnhat()\n self.dan.update()\n # sodem = 0\n # if self.phithuyen.sotiadan == 5:\n # for i in self.dan.sprites():\n # sodem += 1\n # if sodem > 5:\n # sodem = 1\n # if sodem % 4 == 0:\n # i.rect.x += 2\n # if sodem % 5 == 0:\n # i.rect.x += -2\n self.quaivat.update(self.speedqv)\n self.itemnangcap.update()\n self.itemdragonball.update()\n\n #Xử lý va chạm\n # Va chạm đạn và đá\n vacham = pygame.sprite.groupcollide(self.dan, self.quaivat, False, False)\n if vacham:\n for quaivat in self.quaivat.sprites():\n quaivat.soda = quaivat.soda - 1\n print(f'Số đá: {quaivat.soda}')\n if quaivat.soda < 1:\n pygame.sprite.groupcollide(self.dan, self.quaivat, True, True)\n self.capnhatitemnangcap()\n self.capnhatitemdragonball()\n for quaivat in vacham.values():\n self.scores += 10 * len(quaivat)\n self.capnhatscore()\n shootBreak = pygame.mixer.Sound(\"D:\\BanPhiThuyen\\PicAndMusic\\musitat.mp3\")\n shootBreak.set_volume(0.1)\n shootBreak.play()\n break\n else:\n pygame.sprite.groupcollide(self.dan, self.quaivat, True, False)\n # if pygame.sprite.groupcollide(self.dan, self.quaivat, True, True):\n # self.capnhatitemnangcap()\n # self.capnhatitemdragonball()\n # self.scores += 10\n # self.capnhatscore()\n # shootBreak = pygame.mixer.Sound(\"D:\\BanPhiThuyen\\PicAndMusic\\musitat.mp3\")\n # shootBreak.set_volume(0.1)\n # shootBreak.play()\n #Va chạm phi thuyền và đá\n if pygame.sprite.spritecollideany(self.phithuyen, self.quaivat):\n self.quaivat.empty()\n self.dan.empty()\n self.taoquaivat()\n self.phithuyen.rect.midbottom = self.phithuyen.khung_man_hinh.midbottom\n self.phithuyen.sophithuyen -= 1\n if self.phithuyen.sotiadan > 1:\n self.phithuyen.sotiadan -= 1\n self.dauthan.empty()\n self.capnhatdauthan(self.phithuyen.sophithuyen)\n shootLoser = pygame.mixer.Sound(\"D:\\BanPhiThuyen\\PicAndMusic\\musicloser.mp3\")\n shootLoser.set_volume(0.2)\n shootLoser.play()\n time.sleep(2)\n #Va chạm phi thuyền và item máu\n if pygame.sprite.spritecollideany(self.phithuyen, self.itemnangcap):\n self.phithuyen.sophithuyen += 1\n self.dauthan.empty()\n self.capnhatdauthan(self.phithuyen.sophithuyen)\n self.itemnangcap.empty()\n #Va chạm phi thuyền và item đạn\n if pygame.sprite.spritecollideany(self.phithuyen, self.itemdragonball):\n if self.phithuyen.sotiadan < 5:\n self.phithuyen.sotiadan += 1\n self.itemdragonball.empty()\n\n #Hết mạng sẽ out game\n if self.phithuyen.sophithuyen == 0:\n self.dangchoi = False\n\n #Xóa đạn và quái vật nếu ra khỏi màn hình\n for dan in self.dan:\n if dan.rect.bottom <= 0:\n self.dan.remove(dan)\n for quaivat in self.quaivat:\n if quaivat.rect.top >= 899:\n self.quaivat.remove(quaivat)\n\n #Tạo thêm quái vật nếu không còn quái vật\n if not self.quaivat:\n #self.dan.empty()\n self.taoquaivat()\n\n if self.dangchoi:\n hinhnen = pygame.image.load('D:\\BanPhiThuyen\\PicAndMusic\\galaxy.jpg')\n else:\n hinhnen = pygame.image.load('D:\\BanPhiThuyen\\PicAndMusic\\galaxy1.jpg')\n\n self.manhinh.blit(hinhnen, (0, 0))\n\n #Vẽ các đối tượng lên màn hình\n if self.dangchoi:\n self.phithuyen.ve()\n for dan in self.dan.sprites():\n dan.draw()\n self.quaivat.draw(self.manhinh)\n self.dauthan.draw(self.manhinh)\n self.itemnangcap.draw(self.manhinh)\n self.itemdragonball.draw(self.manhinh)\n self.diem.ve()\n if not self.dangchoi: #not self.dangsetting:\n self.nutplay.ve()\n self.nutquit.ve()\n #self.nutsetting.ve()\n\n pygame.display.flip()","sub_path":"BanPhiThuyen.py","file_name":"BanPhiThuyen.py","file_ext":"py","file_size_in_byte":12737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"528683249","text":"from abc import ABC\n\nfrom .fmi2validation import validate_vc, get_default_initial, get_possible_initial, validate_start_value\nfrom .fmi2types import Fmi2DataTypes, Fmi2Initial, Fmi2Causality, Fmi2Variability\n\n\nclass ScalarVariable(ABC):\n\n __vr_counter = 0\n\n def __init__(self,\n name: str, \n data_type: Fmi2DataTypes,\n initial: Fmi2Initial = None,\n causality=Fmi2Causality.local,\n variability=Fmi2Variability.continuous,\n start = None,\n description: str = \"\",\n value_reference: int = None):\n\n err = validate_vc(\n variability, causality)\n\n if(err is not None):\n raise Exception(\n \"Illegal combination fo variability and causality, FMI2 specification describes the issue with this combination as:\\n\" + err)\n\n initial = initial if initial is not None else get_default_initial(\n variability, causality)\n\n allowed_initial = get_possible_initial(variability, causality)\n\n is_valid_initial = initial in allowed_initial\n\n if(not is_valid_initial):\n raise Exception(\n \"Illegal combination of variabilty causality, see FMI2 spec p.49 for legal combinations\")\n\n\n is_valid_start = validate_start_value(variability,causality,initial,start)\n\n if(is_valid_start != None):\n raise Exception(\"Illegal start value\\n\")\n\n self.causality = causality\n self.data_type = data_type\n self.description = description\n self.initial = initial\n self.name = name\n self.variability = variability\n self.start = start\n self.value_reference = value_reference\n \n\n def is_real(self) -> bool:\n return self.data_type == Fmi2DataTypes.real\n\n def is_integer(self) -> bool:\n return self.data_type == Fmi2DataTypes.integer\n\n def is_boolean(self) -> bool:\n return self.data_type == Fmi2DataTypes.boolean\n\n def is_string(self) -> bool:\n return self.data_type == Fmi2DataTypes.string","sub_path":"tests/examples/projects/ConstantSignalGenerator/resources/pyfmu/fmi2variables.py","file_name":"fmi2variables.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"575318974","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.utils as utils\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport torch.nn.init as init\nfrom tobi_util import ResNet, Bottleneck, ResNet_5, ResNet_5_2\nfrom config import resnet_blk as r\n \n################################\n##### tobi model structure #####\n################################\ndef weight_init(m):\n '''\n Usage:\n model = Model()\n model.apply(weight_init)\n '''\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n\n\n\n\n\ndef res_5():\n return ResNet_5(Bottleneck, [3, 4, 6, 3])\n\ndef res_5_2():\n return ResNet_5_2(Bottleneck, [3, 4, 6, 3])\n\ndef ResNet50():\n return ResNet(Bottleneck, [3, 4, 6, 3])\n\nclass Tobi_model(nn.Module):\n def __init__(self):\n super(Tobi_model, self).__init__()\n self.Res = ResNet50()\n self.Res_5 = res_5()\n self.Res_5_2 = res_5_2()\n self.ELU = nn.ELU()\n self.rnn = nn.LSTM(input_size=r['rnn1']['input_size'], hidden_size=r['rnn1']['hidden_size'], num_layers=r['rnn1']['num_layers'], batch_first=r['rnn1']['batch_first'])\n self.rnn_2 = nn.LSTM(input_size=r['rnn2']['input_size'], hidden_size=r['rnn2']['hidden_size'], num_layers=r['rnn2']['num_layers'], batch_first=r['rnn2']['batch_first'])\n self.fc1 = nn.Linear(r['fc1']['input'], r['fc1']['output']) # rcnn1\n self.fc2 = nn.Linear(r['fc2']['input'], r['fc2']['output']) # rcnn2\n self.fc3 = nn.Linear(r['fc3']['input'], r['fc3']['output']) # total\n \n #####\n self.test1 = nn.Linear(r['test1']['input'],r['test1']['output'])\n self.test2 = nn.Linear(r['test2']['input'],r['test2']['output'])\n #####\n \n # self.final_1 = nn.Linear(r['fc4']['input'], r['fc4']['output']) # translation\n # self.final_2 = nn.Linear(r['fc5']['input'], r['fc5']['output']) # quaternion\n self.final_1 = nn.Linear(1024, 3) # translation\n self.final_2 = nn.Linear(1024, 4) # quaternion\n\n self.norm1 = nn.BatchNorm2d(3)\n self.norm2 = nn.BatchNorm2d(3)\n\n for m in self.modules():\n weight_init(m)\n\n\n def forward(self, x):\n\n x_1 = x[0,:].unsqueeze(0) # t-1 step\n x_2 = x[1,:].unsqueeze(0) # t step\n x_1 = self.norm1(x_1)\n x_2 = self.norm2(x_2)\n #########################\n ###Residual block pass###\n ######################### \n x_1 = self.Res(x_1) \n x_2 = self.Res(x_2)\n\n ###########\n ###RCNN1###\n ###########\n x_3 = torch.cat([x_1,x_2],dim = 1)\n x_3 = self.Res_5(x_3)\n x_3, _ = self.rnn(x_3)\n x_3 = self.fc1(x_3)\n x_3 = self.ELU(x_3)\n\n ###########\n ###RCNN2###\n ###########\n x_2 = self.Res_5_2(x_2)\n x_2, _ = self.rnn_2(x_2)\n x_2 = self.fc2(x_2)\n x_2 = self.ELU(x_2)\n\n ##############\n ###FC Layer###\n ##############\n x_2 = x_2.view(1,r['fc1']['output'])\n x_3 = x_3.view(1,r['fc2']['output'])\n x_3 = torch.cat([x_2,x_3],dim = 1)\n x_3 = self.fc3(x_3)\n x_3 = self.ELU(x_3)\n\n\n ####test####\n x_4 = self.test1(x_3)\n x_4 = self.ELU(x_4)\n x_5 = self.test2(x_3)\n x_5 = self.ELU(x_5)\n\n x_4 = self.final_1(x_4)\n x_5 = self.final_2(x_5)\n\n out = torch.cat([x_4,x_5],dim=1)\n return out\n\n # #Translation\n # print(x_3.size())\n\n # x_4 = self.final_1(x_3)\n\n # #Quanternion\n # x_5 = self.final_2(x_3)\n # out = torch.cat([x_4,x_5], dim = 1)\n # return out\n\n# def forward(self, x)\ndef get_loss(x, y):\n# def get_loss(self, x, y):\n # with torch.no_grad():\n # out_1 = self.forward(torch.cat([x[0],x[1]],dim = 0))\n # out_2 = self.forward(torch.cat([x[1],x[2]],dim = 0))\n # out_3 = self.forward(torch.cat([x[2],x[3]],dim = 0))\n # out_4 = self.forward(torch.cat([x[3],x[4]],dim = 0))\n # out_5 = self.forward(torch.cat([x[4],x[5]],dim = 0))\n \n out_1 = forward(torch.cat([x[0],x[1]],dim = 0))\n out_2 = forward(torch.cat([x[1],x[2]],dim = 0))\n out_3 = forward(torch.cat([x[2],x[3]],dim = 0))\n out_4 = forward(torch.cat([x[3],x[4]],dim = 0))\n out_5 = forward(torch.cat([x[4],x[5]],dim = 0))\n\n out_con = torch.cat([out_1,out_2,out_3,out_4,out_5], dim = 0)\n loss = my_loss(out_con, y)\n return loss\n # out_1 = self.forward(torch.cat([x[0],x[1]],dim = 0))\n # out_2 = self.forward(torch.cat([x[1],x[2]],dim = 0))\n # out_3 = self.forward(torch.cat([x[2],x[3]],dim = 0))\n # out_4 = self.forward(torch.cat([x[3],x[4]],dim = 0))\n # out_5 = self.forward(torch.cat([x[4],x[5]],dim = 0))\n\n # out_con = torch.cat([out_1,out_2,out_3,out_4,out_5], dim = 0)\n # loss = self.my_loss(out_con, y)\n # return loss\n\ndef my_loss(out,tar):\n loss = torch.zeros(1)\n # loss += pose_loss(out[0],out[1],tar[0],tar[1])\n # loss += pose_loss(out[1],out[2],tar[1],tar[2])\n # loss += pose_loss(out[2],out[3],tar[2],tar[3])\n # loss += pose_loss(out[3],out[4],tar[3],tar[4])\n # loss += pose_loss(out[0],out[2],tar[0],tar[2])\n # loss += pose_loss(out[2],out[4],tar[2],tar[4])\n # loss += pose_loss(out[0],out[4],tar[0],tar[4]) \n # print(\"now_loss0 : \",now_loss(out[0],tar[0]))\n # print(\"now_loss1 : \",now_loss(out[1],tar[1]))\n # print(\"now_loss2 : \",now_loss(out[2],tar[2]))\n # print(\"now_loss3 : \",now_loss(out[3],tar[3]))\n # print(\"now_loss4 : \",now_loss(out[4],tar[4]))\n loss += now_loss(out[0],tar[0])\n loss += now_loss(out[1],tar[1])\n loss += now_loss(out[2],tar[2])\n loss += now_loss(out[3],tar[3])\n loss += now_loss(out[4],tar[4])\n loss = loss*100\n return loss\n # loss += self.pose_loss(out[0],out[1],tar[0],tar[1])\n # loss += self.pose_loss(out[1],out[2],tar[1],tar[2])\n # loss += self.pose_loss(out[2],out[3],tar[2],tar[3])\n # loss += self.pose_loss(out[3],out[4],tar[3],tar[4])\n # loss += self.pose_loss(out[0],out[2],tar[0],tar[2])\n # loss += self.pose_loss(out[2],out[4],tar[2],tar[4])\n # loss += self.pose_loss(out[0],out[4],tar[0],tar[4]) \n # loss += self.now_loss(out[0],tar[0])\n # loss += self.now_loss(out[1],tar[1])\n # loss += self.now_loss(out[2],tar[2])\n # loss += self.now_loss(out[3],tar[3])\n # loss += self.now_loss(out[4],tar[4])\n # return loss\n\ndef pose_loss(output_1,output_2, target_1, target_2):\n P = torch.dot(output_1, output_2)\n P_truth = torch.dot(target_1, target_2)\n loss = (P - P_truth)**2\n return loss \n\ndef now_loss(output_1,target_1):\n return torch.mean((output_1 - target_1) ** 2)","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"442024749","text":"\"\"\"\nplot coverage\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport csv\nimport os\nimport re\nimport shlex\nimport sys\nimport glob\nimport os\n\ntry:\n import matplotlib\n import itertools\n #import cbook as cbook\n import matplotlib.pyplot as plt\n import seaborn as sns\n import numpy as np\n import pandas as pd\nexcept ImportError as e:\n print('[!] The required Python libraries could not be imported:', file=sys.stderr)\n print('\\t{0}'.format(e))\n sys.exit(1)\n\ndef parse_list(fnames, sel_columns = 'all'):\n for fname in fnames:\n data = parse_item(fname, sel_columns=sel_columns)\n\n return data\n\ndef parse_item(fname, sel_columns='all'):\n \"\"\"Remove all files from data folder\"\"\"\n loc = glob.glob('data/*')\n for f in loc:\n print(f)\n os.remove(f)\n\n \"\"\"read xlsx file and convert to dataframe\"\"\"\n\n excel_file = fname\n all_sheets = pd.read_excel(excel_file, sheet_name = None)\n sheets = all_sheets.keys()\n\n for sheet_name in sheets:\n sheet = pd.read_excel(excel_file, sheet_name = sheet_name)\n sheet['Experiment'] = sheet_name\n sheet.to_csv(\"data/%s.csv\" % sheet_name, index = False)\n\n all_files = glob.glob(os.path.join(\"data\", \"*.csv\"))\n df_from_each_file = (pd.read_csv(f, sep = ',', ) for f in all_files)\n df_merged = pd.concat(df_from_each_file, ignore_index = True)\n #Add Gen marker en true case columns\n df_merged['Gen marker'] = df_merged['Reference sequence'].str.split('_').str[-1]\n df_merged['True case'] = df_merged['Experiment'].str.split('_').str[0]\n #save file when needed\n df_merged.to_csv(\"data/merged.csv\")\n #\n\n list_agg = [ 'mean', 'min', 'max', 'std']\n data = df_merged.groupby( ['Gen marker','Experiment', 'True case'], as_index = False).agg({'Average coverage': list_agg})\n data.columns = data.columns.droplevel(level =0)\n data.columns = ['Gen marker', 'Experiment', 'True case', 'mean', 'min', 'max', 'std']\n\n\n return data\n\ndef plot_data_v1(gen_marker_group, dist_exp, data, outfile):\n f = plt.figure()\n ax = plt.gca()\n\n #gen_marker_group = ['ITS', '28S']\n print(gen_marker_group)\n\n #create marker figures\n markerP = itertools.cycle((',', '+', '.', 'o', '*', 'd', 'p', 'h', 'v', 'x'))\n markerN = itertools.cycle((',', '+', '.', 'o', '*', 'd', 'p', 'h', 'v', 'x'))\n\n #make subplot\n for exp in dist_exp:\n msft = data[(data['Experiment'] == exp)]\n msft = msft[(msft['Gen marker'].isin(gen_marker_group))]\n #print(msft)\n if msft['True case'].iloc[0] == 'P':\n color = 'g'\n else:\n color = 'r'\n #plot per experiment\n sns.stripplot(msft['Gen marker'], msft['mean'], color = color, label = exp,\n marker = next(markerN), s = 11, jitter = 0.05)\n\n # Formatting Labels & Appearance\n ax.set_xlabel(\"Gen marker\")\n ax.set_ylabel(\"Average coverage\")\n #ax.set_yscale('log')\n ax.grid('on')\n\n try:\n # Shrink current axis by 20%\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.92, box.height])\n #put legegd on the right side\n legend = ax.legend(loc = 'center left', bbox_to_anchor = (1 , 0.5))\n frame = legend.get_frame()\n #frame.set_facecolor(bg_color)\n except AttributeError as e:\n # No legend, likely because no labels\n pass\n\n if outfile:\n plt.savefig(outfile + str(gen_marker_group), dpi=300)\n\n plt.show()\n\n\ndef plot_coverage_version1(data, outfile, list_gen_markers_groups):\n #f, axes = plt.subplots(nrows = 1, ncols = 2)\n\n\n dist_exp = data['Experiment'].unique()\n\n #genereting plots\n for group in list_gen_markers_groups:\n plot_data_v1(group, dist_exp, data, outfile)\n\n\ndef plot_coverage_version2(data, outfile):\n print(data)\n n_series = len(data) - 1\n\n f = plt.figure()\n ax = plt.gca()\n\n #set list of colours for genetic markers\n #color_map = getattr(plt.cm, colormap)\n #dist_gen_marker = data['Gen marker'].unique()\n #print(dist_gen_marker)\n #color_list = color_map(np.linspace(0, 1,dist_gen_marker))\n #print(color_list)\n\n TC_list = ['N', 'P']\n markerP = itertools.cycle((',', '+', '.', 'o', '*', 'd', 'p', 'h', 'v', 'x'))\n markerN = itertools.cycle((',', '+', '.', 'o', '*', 'd', 'p', 'h', 'v', 'x'))\n color = itertools.cycle(('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'))\n\n #make subselection of each Experiment\n dist_gen = data['Gen marker'].unique()\n\n for gen in dist_gen:\n msft = data[(data['Gen marker'] == 'ITS')]#gen)]\n #print(msft)\n if msft['True case'].iloc[0] == 'P':\n ax.scatter(msft['Experiment'], msft['mean'], color = next(color), label = msft['Gen marker'],\n marker = next(markerP))\n else:\n ax.scatter(msft['Experiment'], msft['mean'], color = next(color), label = msft['Gen marker'],\n marker = next(markerN))\n\n # Formatting Labels & Appearance\n ax.set_xlabel(\"Gen marker\")\n ax.set_ylabel(\"Average coverage\")\n ax.set_yscale('log')\n ax.grid('on')\n\n try:\n # Shrink current axis by 20%\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.92, box.height])\n #put legegd on the right side\n legend = ax.legend(loc = 'center left', bbox_to_anchor = (1 , 0.5))\n frame = legend.get_frame()\n #frame.set_facecolor(bg_color)\n except AttributeError as e:\n # No legend, likely because no labels\n pass\n\n if outfile:\n plt.savefig(outfile, dpi=300)\n\n plt.show()\n\n\ndef get_gen_markers_groups(data):\n gen_markers_group_1 = np.array(['18S', '28S', '5S', 'ITS'])\n index_group_1 = np.where(np.isin(data['Gen marker'].unique(), gen_markers_group_1))\n gen_markers_group_2 = np.delete(data['Gen marker'].unique(), index_group_1)\n\n gen_marker_groups_list = [gen_markers_group_1 , gen_markers_group_2 ]\n\n return gen_marker_groups_list\n\n\nif __name__ == '__main__':\n\n import argparse\n from argparse import RawDescriptionHelpFormatter\n\n ap = argparse.ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)\n ap.add_argument('-xls', type=str, help='input file(s)', metavar='input file', nargs='+')\n\n io_group = ap.add_mutually_exclusive_group(required=True)\n io_group.add_argument('-o', '--output', type=str, help='PDF output file')\n\n cmd = ap.parse_args()\n\n print(cmd.xls, cmd.output)\n metadata = parse_list(cmd.xls)\n\n list_gen_markers_groups = get_gen_markers_groups(metadata)\n\n plot_coverage_version1(metadata, cmd.output, list_gen_markers_groups)\n #plot_coverage_version2(metadata, cmd.output)\n\n\n# print(\"Hoi, parsed xvgs\", \"\\n\\n\", data)\n #n_series = len(data[1:])\n #n_elements = sum(list(map(len, data[1:])))\n #print(n_elements)\n #print('[+] Read {0} series of data ({1} elements)'.format(n_series, n_elements))\n\n\n\n# plot_data(data, metadata, cmd.xvg_f,\n# window=cmd.window,\n# interactive=cmd.interactive, outfile=cmd.output,\n# colormap=cmd.colormap, bg_color=cmd.background_color)\n","sub_path":"mapping_coverage/arch output/coverage_plot.py","file_name":"coverage_plot.py","file_ext":"py","file_size_in_byte":7155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"346756600","text":"\"\"\"\ndata_utils.py\n\nCollection of functions for dealing with data for plotting.\n\nAuthor: Sam Foreman\nDate: 01/27/2020\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\n\nimport arviz as az\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport xarray as xr\nfrom dataclasses import dataclass\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nfrom typing import Dict\nfrom pathlib import Path\n\nimport utils.file_io as io\nfrom utils.file_io import timeit\nfrom lattice.utils import u1_plaq_exact\nfrom utils.attr_dict import AttrDict\n\n# from plotters.plot_utils import get_matching_log_dirs\n# from plotters.plot_observables import get_obs_dict, grid_plot\n\nmpl.style.use('fast')\nsns.set_palette('bright')\nTLS_DEFAULT = mpl.rcParams['xtick.labelsize']\n\n@dataclass\nclass RunParams:\n hmc: bool\n run_dir: str\n eps: float\n beta: float\n run_steps: int\n plaq_weight: float\n charge_weight: float\n num_steps: int\n x_shape: tuple\n input_shape: tuple\n\n def __post__init__(self):\n self.traj_len = self.num_steps * self.eps\n\n\n@dataclass\nclass ChargeData:\n q: tf.Tensor\n dq: tf.Tensor\n params: RunParams\n\n\n# pylint:disable=invalid-name\n\ndef filter_dict(d, cond, key=None):\n if key is not None:\n val = d[key]\n if isinstance(val, dict):\n return {\n k: v for k, v in val.items() if cond\n }\n raise ValueError('If passing a key, d[key] must be a dict.')\n return {\n k: v for k, v in d.items() if cond\n }\n\n\ndef _look(p, s, conds=None):\n print(f'Looking in {p}...')\n matches = [x for x in Path(p).rglob(f'*{s}*')]\n if conds is not None:\n if isinstance(conds, (list, tuple)):\n for cond in conds:\n matches = [x for x in matches if cond(x)]\n else:\n matches = [x for x in matches if cond(x)]\n\n return matches\n\n\ndef _get_dirs(paths, hmc=False):\n def _look(p, s, conds=None):\n print(f'Looking in {p}...')\n matches = [x for x in Path(p).rglob(f'*{s}*')]\n if conds is not None:\n if isinstance(conds, (list, tuple)):\n for cond in conds:\n matches = [x for x in matches if cond(x)]\n else:\n matches = [x for x in matches if cond(x)]\n return matches\n\n dirs = []\n if hmc:\n search_str = 'HMC_L16_b'\n conds = (\n lambda x: 'hmc_logs' in str(x),\n lambda x: 'hmc' in str(x).lower()\n )\n else:\n search_str = 'L16_b2048'\n conds = (\n lambda x: 'GaugeModel_logs' in (str(x)),\n lambda x: 'HMC_' not in str(x),\n lambda x: Path(x).is_dir(),\n )\n\n if isinstance(paths, (list, tuple)):\n for path in paths:\n dirs += _look(path, search_str, conds)\n\n else:\n dirs = _look(paths, search_str, conds)\n\n return dirs\n\n\n\ndef load_from_dir(d, fnames=None):\n if fnames is None:\n fnames = {\n 'dq': 'dq.z',\n 'charges': 'charges.z',\n 'run_params': 'run_params.z'\n }\n\n darr = [x for x in Path(d).iterdir() if x.is_dir()]\n for rd in darr:\n files = {k: sorted(rd.glob(f'*{v}*')) for k, v in fnames.items()}\n data = {k: io.loadz(v) for k, v in files.items()}\n\n return data\n\n\ndef load_charge_data(dirs, hmc=False):\n data = {}\n for d in dirs:\n print(f'Looking in dir: {d}...')\n if 'inference_hmc' in str(d):\n print(f'Skipping {str(d)}...')\n continue\n\n dqfile = sorted(d.rglob('dq.z'))\n qfile = sorted(d.rglob('charges.z'))\n rpfile = sorted(d.rglob('run_params.z'))\n num_runs = len(dqfile)\n if num_runs > 0:\n for dqf, qf, rpf in zip(dqfile, qfile, rpfile):\n params = io.loadz(rpf)\n\n if 'xeps' and 'veps' in params.keys():\n xeps = np.array([i.numpy() for i in params['xeps']])\n veps = np.array([i.numpy() for i in params['veps']])\n eps = (np.mean(xeps) + np.mean(veps)) / 2.\n elif 'eps' in params.keys():\n eps = params['eps']\n\n params['eps'] = eps\n params = RunParams(**params)\n qarr = io.loadz(qf)\n dqarr = io.loadz(dqf)\n\n print(\n '...loading data for (beta, num_steps, eps): '\n f'({params.beta}, {params.num_steps}, {params.eps:.3g})'\n )\n\n charge_data = ChargeData(q=qarr, dq=dqarr, params=params)\n try:\n data[params.beta].update({params.traj_len: charge_data})\n except KeyError:\n data[params.beta] = {params.traj_len: charge_data}\n\n # def _update_dict(beta, z, qdata):\n # try:\n # z[beta].update({params.traj_len: qdata})\n # except KeyError:\n # z[beta] = {params.traj_len: qdata}\n #\n # return z\n #\n # data = _update_dict(params.beta, data, charge_data)\n\n return data\n\n\ndef calc_tau_int(data, therm_frac=0.2):\n \"\"\"Calculate the integrated autocorrelation time.\"\"\"\n tau_int = {}\n for key, val in data.items():\n tau_int[key] = {}\n for k, v in val.items():\n arr, _ = therm_arr(v, therm_frac=therm_frac)\n arr = arr.T\n pass\n\n\n# reference:\n# https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md\n# ----------\n# Problem:\n# 100. Compute bootstrapped 95% confidence intervals for the mean of a 1D\n# array X (i.e., resample the elements of an array with replacement N times,\n# compute the mean of each sample, and then compute percentiles over the\n# means).\ndef bootstrapped_confidence_interval(x: np.ndarray, N: int = 1000):\n idx = np.random.randint(0, x.size, (N, x.size))\n means = x[idx].mean(axis=1)\n confint = np.percentile(means, [2.5, 97.5])\n\n return confint\n\n\n# Reference: https://dfm.io/posts/autocorr/\ndef next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n\n return i\n\n\ndef autocorr_func_1d(x, norm=True):\n \"\"\"Compute the autocorrelation function of a 1D chain.\"\"\"\n x = np.atleast_1d(x)\n if len(x.shape) != 1:\n raise ValueError('Invalid dimensions for 1D autocorrelation function.')\n\n n = next_pow_two(len(x))\n # Compute the FFT and then (from that) the auto-correlation function\n f = np.fft.fft(x - np.mean(x), n=2*n)\n acf = np.fft.ifft(f * np.conjugate(f))[:len(x)].real\n acf /= 4 * n\n\n # Optionally normalize\n if norm:\n acf /= acf[0]\n\n return acf\n\n\ndef auto_window(taus, c):\n \"\"\"Automated windowing procedure following Sokal (1989).\"\"\"\n m = np.arange(len(taus)) < c * taus\n if np.any(m):\n return np.argmin(m)\n\n return len(taus) - 1\n\n\ndef autocorr_gw2010(y, c=5.0):\n \"\"\"Following the suggestion from Goodman & Weare (2010).\"\"\"\n f = autocorr_func_1d(np.mean(y, axis=0))\n taus = 2.0 * np.cumsum(f) - 1.0\n window = auto_window(taus, c)\n\n return taus[window]\n\n\ndef autocorr_new(y, c=5.0):\n \"\"\"New implementation of autocorrelation function.\"\"\"\n f = np.zeros(y.shape[1])\n for yy in y:\n f += autocorr_func_1d(yy)\n\n f /= len(y)\n taus = 2.0 * np.cumsum(f) - 1.0\n window = auto_window(taus, c)\n\n return taus[window]\n\n\ndef calc_autocorr(x):\n N = np.exp(np.linspace(np.log(100), np.log(y.shape[1]), 20)).astype(int)\n new = np.empty(len(N))\n for i, n in enumerate(N):\n new[i] = autocorr_new(y[:, :n])\n\n return N, new\n\n\ndef flatten_dict(d):\n \"\"\"Recursively convert all entries of `d` to be `AttrDict`.\"\"\"\n if not isinstance(d, AttrDict):\n d = AttrDict(**d)\n\n for key, val in d.items():\n if isinstance(val, dict):\n if not isinstance(val, AttrDict):\n d[key] = flatten_dict(val)\n else:\n d[key] = AttrDict(**val)\n\n return d\n\n\ndef _load_inference_data(log_dir, fnames, inference_str='inference'):\n \"\"\"Helper function for loading inference data from `log_dir`.\"\"\"\n run_dir = os.path.join(log_dir, inference_str)\n if os.path.isdir(run_dir):\n data_dir = os.path.join(run_dir, 'run_data')\n rp_file = os.path.join(run_dir, 'run_params.z')\n if os.path.isfile(rp_file) and os.path.isdir(data_dir):\n run_params = io.loadz(rp_file)\n key = (run_params['beta'],\n run_params['eps'],\n run_params['num_steps'])\n data = [\n io.loadz(os.path.join(data_dir, f'{fname}.z'))\n for fname in fnames\n ]\n\n return key, data\n\n\ndef load_inference_data(dirs, search_strs, inference_str='inference'):\n data = {\n s: {} for s in search_strs\n }\n\n for d in dirs:\n print(f'Looking in dir: {d}...')\n run_dir = Path(os.path.join(d, inference_str))\n if run_dir.is_dir():\n run_dirs = [x for x in run_dir.iterdir() if x.is_dir()]\n for rd in run_dirs:\n print(f'...looking in run_dir: {rd}...')\n rp_file = os.path.join(str(rd), 'run_params.z')\n if os.path.isfile(rp_file):\n params = io.loadz(rp_file)\n beta = params['beta']\n eps = params['eps']\n num_steps = params['num_steps']\n data_dir = os.path.join(str(rd), 'run_data')\n if os.path.isdir(data_dir):\n for search_str in search_strs:\n dfile = os.path.join(data_dir, f'{search_str}.z')\n if os.path.isfile(dfile):\n _data = io.loadz(dfile)\n try:\n data[search_str].update({\n (beta, num_steps, eps): _data\n })\n except KeyError:\n data[search_str] = {\n (beta, num_steps, eps): _data\n }\n\n return data\n\n\ndef _get_l2hmc_dirs(paths, search_str=None):\n \"\"\"Look for `log_dirs` containing a training/inference run for L2HMC.\"\"\"\n if search_str is None:\n search_str = '*L16_b*'\n\n dirs = []\n for path in paths:\n if not isinstance(path, Path):\n path = Path(os.path.abspath(path))\n\n print(f'Looking in {path}...')\n dirs += [\n x for x in path.rglob(search_str)\n if 'GaugeModel_logs' in str(x)\n and 'HMC_' not in str(x)\n and x.is_dir()\n ]\n\n return dirs\n\n\ndef get_l2hmc_dirs():\n bd_local = os.path.abspath(\n '/Users/saforem2/thetaGPU/training'\n )\n bd_theta = os.path.abspath(\n '/lus/theta-fs0/projects/DLHMC/thetaGPU/training'\n )\n\n l2hmc_dirs = []\n if os.path.isdir(bd_local):\n l2hmc_dirs += _get_l2hmc_dirs(bd_local)\n\n if os.path.isdir(bd_theta):\n l2hmc_dirs += _get_l2hmc_dirs(bd_theta)\n\n return l2hmc_dirs\n\n\ndef get_hmc_dirs(base_dir=None):\n if base_dir is None:\n base_dir = os.path.abspath(\n '/lus/theta-fs0/projects/DLHMC/thetaGPU/inference/'\n )\n if not os.path.isdir(base_dir):\n base_dir = os.path.abspath(\n '/Users/saforem2/thetaGPU/inference'\n )\n if not os.path.isdir(base_dir):\n raise FileNotFoundError(f'Unable to locate {base_dir}')\n base_dir = Path(base_dir)\n hmc_dirs = [x for x in hmc_dir.rglob('*HMC_L16*') if x.is_dir()]\n\n return hmc_dirs\n\n\ndef bootstrap(x, reps=10000):\n n = len(x)\n xb = np.random.choice(x, (n, reps), replace=True)\n yb = xb.mean(axis=0)\n upper, lower = np.percentile(yb, [2.5, 97.5])\n\n return yb, (lower, upper)\n\n\ndef dq_stats(dq, reps=10000, therm_frac=0.2):\n stats = {}\n for key, val in dq.items():\n for k, v in val.items():\n data = therm_arr(v, therm_frac=therm_frac, ret_steps=False)\n avgs = []\n errs = []\n for chain in data.T:\n avg, (lower, upper) = bootstrap(chain, reps)\n err = np.max([np.abs(avg - lower), np.abs(upper - avg)])\n avgs.append(avg)\n errs.append(err)\n\n try:\n stats[key].update({\n k: {\n 'avg': np.mean(avgs),\n 'avg_std': np.std(avgs),\n 'err': np.mean(errs),\n 'err_std': np.std(errs),\n 'min': np.min(data),\n 'max': np.max(data),\n }\n })\n except KeyError:\n stats[key] = {\n k: {\n 'avg': np.mean(avgs),\n 'avg_std': np.std(avgs),\n 'err': np.mean(errs),\n 'err_std': np.std(errs),\n 'min': np.min(data),\n 'max': np.max(data),\n },\n }\n\n return stats\n\n\n\ndef autocorrelation_time(x, s, mu, var):\n \"\"\"Compute the autocorrelation time.\"\"\"\n b, t, d = x.shape\n act_ = np.zeros([d])\n for i in range(b):\n y = x[i] - mu\n p, n = y[:-s], y[s:]\n act_ += np.mean(p * n, axis=0) / var\n act_ /= b\n\n return act_\n\n\ndef effective_sample_size(x, mu, var):\n # batch_size, time, dimension\n b, t, d = x.shape\n ess_ = np.ones([d])\n for s in range(1, t):\n p = autocorrelation_time(x, s, mu, var)\n if np.sum(p > 0.05) == 0:\n break\n else:\n for j in range(d):\n if p[j] > 0.05:\n ess_[j] += 2. * p[j] * (1. - float(s) / t)\n return t / ess_\n\n\ndef batch_means_ess(x):\n \"\"\"Estimate the ESS.\n\n We estimate the ESS as the ratio of the variance of the batch means to the\n variance of the chain, [ref](https://arxiv.org/pdf/1011.0175.pdf).\n\n NOTE: `x` should be a chain with shape: [time_steps, num_chains, dim].\n \"\"\"\n x = np.transpose(x, [1, 0, 2])\n T, M, D = x.shape\n num_batches = int(np.floor(T ** (1 / 3)))\n batch_size = int(np.floor(num_batches ** 2))\n batch_means = []\n for i in range(num_batches):\n batch = x[batch_size * i:batch_size * i + batch_size]\n batch_means.append(np.mean(batch, axis=0))\n batch_variance = np.var(np.array(batch_means), axis=0)\n chain_variance = np.var(x, axis=0)\n act = batch_size * batch_variance / (chain_variance + 1e-20)\n\n return 1. / act\n\n\ndef _calc_var_explained(x):\n \"\"\"Calculate the % variance explained by the singular values of `x`.\"\"\"\n _, s, _ = np.linalg.svd(x, full_matrices=True)\n return s ** 2 / np.sum(s ** 2)\n\n\ndef calc_var_explained(weights_dict):\n \"\"\"Calculate the % variance explained by the sv's for each weight mtx.\"\"\"\n xweights = weights_dict['xnet']\n vweights = weights_dict['vnet']\n var_explained = {}\n for ((xk, xv), (vk, vv)) in zip(xweights.items(), vweights.items()):\n xk_ = f'xnet_{xk}'\n vk_ = f'vnet_{vk}'\n var_explained[xk_] = _calc_var_explained(xv)\n var_explained[vk_] = _calc_var_explained(vv)\n\n return var_explained\n\n\ndef bootstrap_old(data, n_boot=10000, ci=68):\n \"\"\"Bootstrap resampling.\n\n Returns:\n mean (float): Mean of the (bootstrap) resampled data.\n err (float): Standard deviation of the (bootstrap) resampled data.\n data_rs (np.ndarray): Boostrap resampled data.\n \"\"\"\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n\n step_axis = np.argmax(data.shape)\n\n samples = []\n for _ in range(int(n_boot)):\n resampler = np.random.randint(0,\n data.shape[step_axis],\n data.shape[step_axis])\n sample = data.take(resampler, axis=step_axis)\n samples.append(np.mean(sample, axis=step_axis))\n\n data_rs = np.array(samples)\n mean = np.mean(data_rs)\n err = np.std(data_rs)\n\n return mean, err, data_rs\n\n\ndef calc_tunneling_rate(charges):\n \"\"\"Calculate the tunneling rate as Q_{i+1} - Q_{i}.\"\"\"\n if not isinstance(charges, np.ndarray):\n charges = np.array(charges)\n\n charges = charges.T if charges.shape[0] > charges.shape[1] else charges\n charges = np.around(charges)\n # dq = np.abs(charges[:, 1:] - charges[:, :-1])\n dq = np.abs(charges[1:] - charges[:-1])\n tunneling_rate = np.mean(dq, axis=1)\n\n return dq, tunneling_rate\n\n\ndef therm_arr(arr, therm_frac=0.2, ret_steps=True):\n \"\"\"Drop first `therm_frac` steps of `arr` to account for thermalization.\"\"\"\n # step_axis = np.argmax(arr.shape)\n step_axis = 0\n num_steps = arr.shape[step_axis]\n therm_steps = int(therm_frac * num_steps)\n arr = np.delete(arr, np.s_[:therm_steps], axis=step_axis)\n steps = np.arange(therm_steps, num_steps)\n\n if ret_steps:\n return arr, steps\n\n return arr\n\n\n# pylint:disable = invalid-name\nclass InferenceData:\n \"\"\"InferenceData object.\"\"\"\n\n def __init__(self, params, run_params, run_data, energy_data):\n self._params = params\n self._run_params = run_params\n\n self._run_data = run_data\n self._energy_data = self._sort_energy_data(energy_data)\n\n self._log_dir = params.get('log_dir', None)\n self._params = io.loadz(os.path.join(self._log_dir,\n 'parameters.pkl'))\n self._train_weights = (\n self._params['x_scale_weight'],\n self._params['x_translation_weight'],\n self._params['x_transformation_weight'],\n self._params['v_scale_weight'],\n self._params['v_translation_weight'],\n self._params['v_transformation_weight'],\n )\n _tws_title = ', '.join((str(i) for i in self._train_weights))\n self._tws_title = f'({_tws_title})'\n self._tws_fname = ''.join((io._strf(i) for i in self._train_weights))\n\n @staticmethod\n def _sort_energy_data(energy_data):\n ekeys = ['potential_init', 'potential_proposed', 'potential_out',\n 'kinetic_init', 'kinetic_proposed', 'kinetic_out',\n 'hamiltonian_init', 'hamiltonian_proposed', 'hamiltonian_out']\n energy_data = {k: energy_data[k] for k in ekeys}\n return energy_data\n\n @staticmethod\n def build_energy_dataset(energy_data):\n \"\"\"Build energy dataset.\"\"\"\n ed_dict = {}\n for key, val in energy_data.items():\n arr, steps = therm_arr(np.array(val))\n arr = arr.T\n chains = np.arange(arr.shape[0])\n ed_dict[key] = xr.DataArray(arr,\n dims=['chain', 'draw'],\n coords=[chains, steps])\n dataset = xr.Dataset(ed_dict)\n\n return dataset\n\n @staticmethod\n def build_dataset(run_data, run_params):\n \"\"\"Build dataset from `run_data.\"\"\"\n rd_dict = {}\n for key, val in run_data.items():\n if 'mask' in key:\n continue\n\n x, draws = therm_arr(np.array(val))\n x = x.T\n\n if 'plaqs' in key:\n key = 'plaqs_diffs'\n x = u1_plaq_exact(run_params['beta']) - x\n\n if 'charges' in key:\n x = np.around(x)\n\n chains = x.shape[0]\n rd_dict[key] = xr.DataArray(x,\n dims=['chain', 'draw'],\n coords=[chains, draws])\n\n rd_dict['charges_squared'] = rd_dict['charges'] ** 2\n\n charges = rd_dict['charges'].values.T\n _, dq = calc_tunneling_rate(charges)\n dq = dq.T\n\n chains = np.arange(dq.shape[0])\n rd_dict['tunneling_rate'] = xr.DataArray(dq,\n dims=['chain', 'draw'],\n coords=[chains, draws])\n\n dataset = xr.Dataset(rd_dict)\n\n return dataset\n\n def _plot_setup(self, run_params, idx=None, nw_run=True):\n \"\"\"Setup for creating plots.\n\n Returns:\n fname (str): String containing the filename containing info about\n data.\n title_str (str): Title string to set as title of figure.\n \"\"\"\n eps = run_params['eps']\n beta = run_params['beta']\n net_weights = run_params['net_weights']\n\n nw_str = ''.join((io.strf(i).replace('.', '') for i in net_weights))\n nws = '(' + ', '.join((str(i) for i in net_weights)) + ')'\n\n lf = self._params['num_steps']\n fname = f'lf{lf}'\n run_steps = run_params['run_steps']\n fname += f'_steps{run_steps}'\n title_str = (r\"$N_{\\mathrm{LF}} = $\" + f'{lf}, '\n r\"$\\beta = $\" + f'{beta:.1g}, '\n r\"$\\varepsilon = $\" + f'{eps:.3g}')\n eps_str = f'{eps:4g}'.replace('.', '')\n fname += f'_e{eps_str}'\n\n if self._params.get('eps_fixed', False):\n title_str += ' (fixed)'\n fname += '_fixed'\n\n if any([tw == 0 for tw in self._train_weights]):\n title_str += (', '\n + r\"$\\mathrm{nw}_{\\mathrm{train}} = $\"\n + f' {self._tws_title}')\n fname += f'_train{self._tws_fname}'\n\n clip_value = self._params.get('clip_value', 0)\n if clip_value > 0:\n title_str += f', clip: {clip_value}'\n fname += f'_clip{clip_value}'.replace('.', '')\n\n if nw_run:\n title_str += ', ' + r\"$\\mathrm{nw}_{\\mathrm{run}}=$\" + f' {nws}'\n fname += f'_{nw_str}'\n # fname += f'_{net_weights_str}'\n\n if idx is not None:\n fname += f'_{idx}'\n\n return fname, title_str\n\n @staticmethod\n def _savefig(fig, out_file):\n \"\"\"Save `fig` to `out_file`.\"\"\"\n io.log(f'Saving figure to: {out_file}.')\n fig.savefig(out_file, dpi=200, bbox_inches='tight')\n\n def _plot_posterior(self, data, out_file, title_str,\n var_names=None, out_file1=None):\n \"\"\"Plot posterior using `arviz.plot_posterior`.\"\"\"\n _ = az.plot_posterior(data, var_names=var_names)\n fig = plt.gcf()\n fig.suptitle(title_str, fontsize=14, y=1.05)\n self._savefig(fig, out_file)\n if out_file1 is not None:\n self._savefig(fig, out_file1)\n\n def _plot_trace(self, data, out_file, title_str,\n var_names=None, out_file1=None):\n _ = az.plot_trace(data,\n compact=True,\n combined=True,\n var_names=var_names)\n fig = plt.gcf()\n fig.suptitle(title_str, fontsize=14, y=1.05)\n self._savefig(fig, out_file)\n if out_file1 is not None:\n self._savefig(fig, out_file)\n\n def make_plots(self,\n run_params,\n run_data=None,\n energy_data=None,\n runs_np=True, out_dir=None):\n \"\"\"Create trace + KDE plots of lattice observables and energy data.\"\"\"\n type_str = 'figures_np' if runs_np else 'figures_tf'\n figs_dir = os.path.join(self._log_dir, type_str)\n fig_dir = os.path.join(figs_dir, run_params['run_str'])\n io.check_else_make_dir(fig_dir)\n\n dataset = None\n energy_dataset = None\n try:\n fname, title_str = self._plot_setup(run_params)\n except FileNotFoundError:\n return dataset, energy_dataset\n\n tp_fname = f'{fname}_traceplot'\n pp_fname = f'{fname}_posterior'\n rp_fname = f'{fname}_ridgeplot'\n\n dataset = self.build_dataset(run_data, run_params)\n\n tp_out_file = os.path.join(fig_dir, f'{tp_fname}.pdf')\n pp_out_file = os.path.join(fig_dir, f'{pp_fname}.pdf')\n\n var_names = ['tunneling_rate', 'plaqs_diffs']\n if hasattr(dataset, 'dx'):\n var_names.append('dx')\n var_names.extend(['accept_prob', 'charges_squared', 'charges'])\n\n tp_out_file_ = None\n pp_out_file_ = None\n if out_dir is not None:\n io.check_else_make_dir(out_dir)\n tp_out_file1 = os.path.join(out_dir, f'{tp_fname}.pdf')\n pp_out_file1 = os.path.join(out_dir, f'{pp_fname}.pdf')\n\n ###################################################\n # Create traceplot + posterior plot of observables\n ###################################################\n self._plot_trace(dataset, tp_out_file,\n var_names=var_names,\n out_file1=tp_out_file1)\n\n self._plot_posterior(dataset, pp_out_file,\n var_names=var_names,\n out_file1=pp_out_file1)\n\n # * * * * * * * * * * * * * * * * *\n # Create ridgeplot of plaq diffs *\n # * * * * * * * * * * * * * * * * *\n rp_out_file = os.path.join(fig_dir, f'{rp_fname}.pdf')\n _ = az.plot_forest(dataset,\n kind='ridgeplot',\n var_names=['plaqs_diffs'],\n ridgeplot_alpha=0.4,\n ridgeplot_overlap=0.1,\n combined=False)\n fig = plt.gcf()\n fig.suptitle(title_str, fontsize='x-large', y=1.025)\n self._savefig(fig, rp_out_file)\n if out_dir is not None:\n rp_out_file1 = os.path.join(out_dir, f'{rp_fname}.pdf')\n self._savefig(fig, rp_out_file1)\n\n # * * * * * * * * * * * * * * * * * * * * * * * * * *\n # Create traceplot + posterior plot of energy data *\n # * * * * * * * * * * * * * * * * * * * * * * * * * *\n if energy_data is not None:\n energy_dataset = self.energy_plots(energy_data, run_params,\n fname, out_dir=out_dir)\n\n return dataset, energy_dataset\n\n def energy_plots(self, energy_data, fname, out_dir=None):\n \"\"\"Plot energy data from inference run.\"\"\"\n energy_dataset = self.build_dataset(energy_data, )\n epp_fname = f'{fname}_energy_posterior'\n etp_fname = f'{fname}_energy_traceplot'\n etp_out_file = os.path.join(fig_dir, f'{etp_fname}.pdf')\n epp_out_file = os.path.join(fig_dir, f'{epp_fname}.pdf')\n\n etp_out_file1 = None\n etp_out_file2 = None\n if out_dir is not None:\n etp_out_file1 = os.path.join(out_dir, f'{etp_fname}.pdf')\n epp_out_file1 = os.path.join(out_dir, f'{epp_fname}.pdf')\n\n self._plot_trace(energy_dataset, etp_out_file,\n out_file1=etp_out_file1)\n self._plot_posterior(energy_dataset, epp_out_file,\n out_file1=epp_out_file1)\n\n return energy_dataset\n\n\n# pylint: disable=invalid-name\nclass DataLoader:\n \"\"\"DataLoader object.\"\"\"\n\n def __init__(self,\n log_dir=None,\n n_boot=5000,\n therm_frac=0.25,\n nw_include=None,\n calc_stats=True,\n filter_str=None,\n runs_np=False):\n \"\"\"Initialization method.\"\"\"\n self._log_dir = log_dir\n self._n_boot = n_boot\n self._therm_frac = therm_frac\n self._nw_include = nw_include\n self._calc_stats = calc_stats\n self.run_dirs = io.get_run_dirs(log_dir, filter_str, runs_np)\n self._params = io.loadz(os.path.join(self._log_dir,\n 'parameters.pkl'))\n self._train_weights = (\n self._params['x_scale_weight'],\n self._params['x_translation_weight'],\n self._params['x_transformation_weight'],\n self._params['v_scale_weight'],\n self._params['v_translation_weight'],\n self._params['v_transformation_weight'],\n )\n _tws_title = ', '.join((str(i) for i in self._train_weights))\n self._tws_title = f'({_tws_title})'\n self._tws_fname = ''.join((io.strf(i) for i in self._train_weights))\n\n def _load_sqz(self, fname):\n data = io.loadz(os.path.join(self._obs_dir, fname))\n return np.squeeze(np.array(data))\n\n def _get_dx(self, fname):\n dx_file = os.path.join(self._obs_dir, fname)\n if os.path.isfile(dx_file):\n dx = self._load_sqz(fname)\n\n def _stats(self, arr, axis=0):\n \"\"\"Calculate statistics using `bootstrap` resampling along `axis`.\"\"\"\n # _, _, arr = bootstrap(arr, n_boot=self._n_boot)\n arr, _ = bootstrap(arr, n_boot=self._n_boot)\n return arr.mean(axis=axis).flatten(), arr.std(axis=axis).flatten()\n\n def _get_observables_bs(self, data, run_params):\n data_bs = {}\n for key, val in data.items():\n if val is None:\n continue\n avg, err = self._stats(val, axis=0)\n err_key = f'{key}_err'\n data_bs[key] = avg\n data_bs[err_key] = err\n\n entries = len(avg.flatten())\n net_weights = tuple(run_params['net_weights'])\n data_bs['run_dir'] = np.array([run_dir for _ in range(entries)])\n data_bs['net_weights'] = tuple([net_weights for _ in range(entries)])\n data_bs['log_dir'] = np.array([self._log_dir for _ in range(entries)])\n\n return pd.DataFrame(data_bs)\n\n def get_observables(self, run_dir=None):\n \"\"\"Get all observables from inference_data in `run_dir`.\"\"\"\n run_params = io.loadz(os.path.join(run_dir, 'run_params.pkl'))\n beta = run_params['beta']\n net_weights = tuple([int(i) for i in run_params['net_weights']])\n\n keep = True\n if self._nw_include is not None:\n keep = net_weights in self._nw_include\n\n # If none (< 10 %) of the proposed configs are rejected,\n # don't bother loading data and calculating statistics.\n px = self._load_sqz('px.pkl')\n avg_px = np.mean(px)\n if avg_px < 0.1 or not keep:\n io.log(f'Skipping! nw: {net_weights}, avg_px: {avg_px:.3g}')\n return None, run_params\n\n io.log(f'Loading data for net_weights: {net_weights}...')\n io.log(f' run_dir: {run_dir}')\n\n # load chages, plaqs data\n charges = self._load_sqz('charges.pkl')\n plaqs = self._load_sqz('plaqs.pkl')\n dplq = u1_plaq_exact(beta) - plaqs\n\n # thermalize configs\n px, _ = therm_arr(px, self._therm_frac)\n dplq, _ = therm_arr(dplq, self._therm_frac)\n charges, _ = np.insert(charges, 0, 0, axis=0)\n charges, _ = therm_arr(charges)\n dq, _ = calc_tunneling_rate(charges)\n dq = dq.T\n\n dx = self._get_dx('dx.pkl')\n dxf = self.get_dx('dxf.pkl')\n dxb = self._get_dx('dxb.pkl')\n observables = {\n 'plaqs_diffs': dplq,\n 'accept_prob': px,\n 'tunneling_rate': dq,\n }\n _names = ['dx', 'dxf', 'dxb']\n _vals = [dx, dxf, dxb]\n for name, val in zip(_names, _vals):\n if val is not None:\n observables[name] = val\n\n return observables\n\n def _build_dataframes(self, observables, run_params):\n \"\"\"Build dataframes from `observables`.\"\"\"\n data_bs = None\n if self._calc_stats:\n data_bs = self._get_data_bs(observables, run_params)\n\n data = {}\n for key, val in observables.items():\n data[key] = val.flatten()\n\n entries = len(dq.flatten())\n data = {key: val.flatten() for (key, val) in observables.items()}\n data.update({\n 'run_dir': np.array([run_dir for _ in range(entries)]),\n 'net_weights': tuple([net_weights for _ in range(entries)]),\n 'log_dir': np.array([self._log_dir for _ in range(entries)]),\n })\n\n data = pd.DataFrame(data)\n\n return data, data_bs, run_params\n\n def build_dataframes(self, data=None, data_bs=None, **kwargs):\n \"\"\"Build `pd.DataFrames` containing all inference data from `run_dirs`.\n\n Args:\n run_dirs (array-like): List of run_dirs in which to look for\n inference data.\n data (pd.DataFrame): DataFrame containing inference data. If `data\n is not None`, the new `pd.DataFrame` will be appended to\n `data`.\n data_bs (pd.DataFrame): DataFrame containing bootstrapped inference\n data. If `data_bs is not None`, the new `pd.DataFrame` will be\n appended to `data_bs`.\n\n Kwargs:\n Passed to `get_observables`.\n\n Returns:\n data (pd.DataFrame): DataFrame containing (flattened) inference\n data.\n data_bs (pd.DataFrame): DataFrame containing (bootstrapped)\n inference data.\n run_params (dict): Dictionary of parameters used to generate\n inference data.\n \"\"\"\n run_params = None\n for run_dir in run_dirs:\n if data is not None and hasattr(data, 'run_dir'):\n if not data[data.run_dir == run_dir].empty:\n continue\n\n run_params_file = os.path.join(run_dir, 'run_params.pkl')\n if not os.path.isfile(run_params_file):\n run_params = None\n continue\n\n _data, _data_bs, run_params = self.get_observables(run_dir)\n\n if data is None:\n data = _data\n else:\n data = pd.concat(\n [data, _data], axis=0\n ).reset_index(drop=True)\n\n if data_bs is None:\n data_bs = _data_bs\n else:\n data_bs = pd.concat(\n [data_bs, _data_bs], axis=0\n ).reset_index(drop=True)\n\n return data, data_bs, run_params\n\n def _plot_setup(self, run_params, idx=None, nw_run=True):\n \"\"\"Setup for creating plots.\n\n Returns:\n fname (str): String containing the filename containing info about\n data.\n title_str (str): Title string to set as title of figure.\n \"\"\"\n eps = run_params['eps']\n beta = run_params['beta']\n net_weights = run_params['net_weights']\n\n nw_str = ''.join((io.strf(i).replace('.', '') for i in net_weights))\n nws = '(' + ', '.join((str(i) for i in net_weights)) + ')'\n\n lf = self._params['num_steps']\n fname = f'lf{lf}'\n run_steps = run_params['run_steps']\n fname += f'_steps{run_steps}'\n title_str = (r\"$N_{\\mathrm{LF}} = $\" + f'{lf}, '\n r\"$\\beta = $\" + f'{beta:.1g}, '\n r\"$\\varepsilon = $\" + f'{eps:.3g}')\n eps_str = f'{eps:4g}'.replace('.', '')\n fname += f'_e{eps_str}'\n\n if self._params.get('eps_fixed', False):\n title_str += ' (fixed)'\n fname += '_fixed'\n\n if any([tw == 0 for tw in self._train_weights]):\n title_str += (', '\n + r\"$\\mathrm{nw}_{\\mathrm{train}} = $\"\n + f' {self._tws_title}')\n fname += f'_train{self._tws_fname}'\n\n clip_value = self._params.get('clip_value', 0.)\n if clip_value > 0:\n title_str += f', clip: {clip_value}'\n fname += f'_clip{clip_value}'.replace('.', '')\n\n if nw_run:\n title_str += ', ' + r\"$\\mathrm{nw}_{\\mathrm{run}}=$\" + f' {nws}'\n fname += f'_{net_weights_str}'\n\n if idx is not None:\n fname += f'_{idx}'\n\n return fname, title_str\n\n def _savefig(self, fig, out_file):\n \"\"\"Save `fig` to `out_file`.\"\"\"\n io.log(f'Saving figure to: {out_file}.')\n fig.savefig(out_file, dpi=200, bbox_inches='tight')\n\n def _plot_posterior(self, data, out_file, title_str,\n var_names=None, out_file1=None):\n \"\"\"Plot posterior using `arviz.plot_posterior`.\"\"\"\n _ = az.plot_posterior(data, var_names=var_names)\n fig = plt.gcf()\n fig.suptitle(title_str, fontsize=14, y=1.05)\n self._savefig(fig, out_file)\n if out_file1 is not None:\n self._savefig(fig, out_file1)\n\n def _plot_trace(self, data, out_file, title_str,\n var_names=None, out_file1=None):\n _ = az.plot_trace(data,\n compact=True,\n combined=True,\n var_names=var_names)\n fig = plt.gcf()\n fig.suptitle(title_str, fontsize=14, y=1.05)\n self._savefig(fig, out_file)\n if out_file1 is not None:\n self._savefig(fig, out_file)\n\n def inference_plots(self,\n run_data,\n run_params,\n runs_np=True,\n out_dir=None,\n energy_data=None):\n \"\"\"Create trace + KDE plots of lattice observables and energy data.\"\"\"\n type_str = 'figures_np' if runs_np else 'figures_tf'\n figs_dir = os.path.join(self._log_dir, type_str)\n fig_dir = os.path.join(figs_dir, run_params['run_str'])\n io.check_else_make_dir(fig_dir)\n\n dataset = None\n energy_dataset = None\n try:\n fname, title_str, _ = self._plot_setup(run_params)\n except FileNotFoundError:\n return dataset, energy_dataset\n\n tp_fname = f'{fname}_traceplot'\n pp_fname = f'{fname}_posterior'\n rp_fname = f'{fname}_ridgeplot'\n\n dataset = self.build_dataset(run_data, run_params)\n\n tp_out_file = os.path.join(fig_dir, f'{tp_fname}.pdf')\n pp_out_file = os.path.join(fig_dir, f'{pp_fname}.pdf')\n\n var_names = ['tunneling_rate', 'plaqs_diffs']\n if hasattr(dataset, 'dx'):\n var_names.append('dx')\n var_names.extend(['accept_prob', 'charges_squared', 'charges'])\n\n tp_out_file_ = None\n pp_out_file_ = None\n if out_dir is not None:\n io.check_else_make_dir(out_dir)\n tp_out_file1 = os.path.join(out_dir, f'{tp_fname}.pdf')\n pp_out_file1 = os.path.join(out_dir, f'{pp_fname}.pdf')\n\n # -- Create traceplot + posterior plot of observables -----------\n self._plot_trace(dataset, tp_out_file,\n var_names=var_names,\n out_file1=tp_out_file1)\n\n self._plot_posterior(dataset, pp_out_file,\n var_names=var_names,\n out_file1=pp_out_file1)\n\n # * * * * * * * * * * * * * * * * *\n # Create ridgeplot of plaq diffs *\n # * * * * * * * * * * * * * * * * *\n rp_out_file = os.path.join(fig_dir, f'{rp_fname}.pdf')\n _ = az.plot_forest(dataset,\n kind='ridgeplot',\n var_names=['plaqs_diffs'],\n ridgeplot_alpha=0.4,\n ridgeplot_overlap=0.1,\n combined=False)\n fig = plt.gcf()\n fig.suptitle(title_str, fontsize='x-large', y=1.025)\n self._savefig(fig, rp_out_file)\n if out_dir is not None:\n rp_out_file1 = os.path.join(out_dir, f'{rp_fname}.pdf')\n self._savefig(fig, rp_out_file1)\n\n # -- Create traceplot + posterior plot of energy data --------------\n if energy_data is not None:\n energy_dataset = self.energy_plots(energy_data,\n fname, out_dir=out_dir)\n\n def energy_plots(self, energy_data, fname, out_dir=None):\n \"\"\"Plot energy data from inference run.\"\"\"\n energy_dataset = self.build_dataset(energy_data)\n epp_fname = f'{fname}_energy_posterior'\n etp_fname = f'{fname}_energy_traceplot'\n etp_out_file = os.path.join(fig_dir, f'{etp_fname}.pdf')\n epp_out_file = os.path.join(fig_dir, f'{epp_fname}.pdf')\n if out_dir is not None:\n etp_out_file1 = os.path.join(out_dir, f'{etp_fname}.pdf')\n epp_out_file1 = os.path.join(out_dir, f'{epp_fname}.pdf')\n self._plot_trace(energy_dataset, etp_out_file)\n self._plot_posterior(energy_dataset, epp_out_file)\n\n return energy_dataset\n","sub_path":"l2hmc-qcd/utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":40600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"408284877","text":"import os\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nfrom data_loader import AudioLoader\nfrom model import ResNet\nfrom spect_loss import SpectLoss\n\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n\nds = AudioLoader(\n base_path='/home/aj/repo/snippets/audio_super_resolution/resources/wav',\n json_file_list='resources/duration_list_prune.json',\n n_samples=32000,\n n_mask=80,\n noise_file='resources/noise/whitenoisegaussian.wav'\n)\ndl = DataLoader(ds, batch_size=8, num_workers=8, pin_memory=True)\n\nmodel = ResNet(n_layers=[3, 5, 5, 3], init_planes=32)\nmodel.to(device)\n\nlog = SummaryWriter('log_spect')\n\ncriterion1 = torch.nn.SmoothL1Loss(beta=.01)\ncriterion2 = SpectLoss(lowest_bin=1).to(device)\n\noptimizer = torch.optim.Adam(model.parameters(), lr=.001)\nscheduler = torch.optim.lr_scheduler.StepLR(\n optimizer,\n step_size=3,\n gamma=.5\n)\n\ncounter = 0\nfor e in range(1000):\n print('-' * 20 + f'epoch: {e+1:02d}' + '-' * 20)\n for x, y in tqdm(dl):\n x = x.to(device)\n y = y.to(device)\n out = model(x)\n\n loss1 = criterion1(out, y)\n loss2 = criterion2(out, y)\n loss = .95 * loss1 + .05 * loss2\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n log.add_scalar('loss1', loss1.item(), counter)\n log.add_scalar('loss2', loss2.item(), counter)\n log.add_scalar('loss', loss.item(), counter)\n counter += 1\n torch.save(\n model.state_dict(),\n f'models/asr_e{e+1:02d}.pth'\n )\n scheduler.step()\n","sub_path":"audio_super_resolution/train_spect.py","file_name":"train_spect.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"56202010","text":"from torchvision.models.detection.faster_rcnn import fasterrcnn_resnet50_fpn\nimport torch, cv2, base64\nimport numpy as np\n\n\nCOCO_INSTANCE_CATEGORY_NAMES = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',\n 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',\n 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',\n 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',\n 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n]\n\nmodel = fasterrcnn_resnet50_fpn(pretrained=True)\nmodel.eval()\n\ndef cv2_to_tensor(image):\n image = torch.from_numpy(image)\n image = image.float() / torch.max(image)\n image = image.permute(2, 0, 1)\n image = image.unsqueeze(0)\n return image\n\ndef show_image(src):\n cv2.imshow('window', src)\n cv2.waitKey(0)\n\ndef add_border(image):\n border_vertical = int(image.shape[0] * 0.05)\n border_horizontal = int(image.shape[1] * 0.05)\n image = cv2.copyMakeBorder(\n src = image,\n top = border_vertical,\n bottom = border_vertical,\n left = border_horizontal,\n right = border_horizontal,\n borderType = cv2.BORDER_CONSTANT\n )\n return image\n\ndef process_predictions(output, threshold=0.70):\n boxes = output[0]['boxes']\n labels = output[0]['labels']\n scores = output[0]['scores']\n predictions = []\n for idx in range(len(scores)):\n if scores[idx] >= threshold:\n predictions.append({\n 'box': boxes[idx],\n 'label': COCO_INSTANCE_CATEGORY_NAMES[labels[idx]],\n 'score': scores[idx]\n })\n if len(predictions) is 0:\n idx = scores.index(max(scores))\n predictions.append({\n 'box': boxes[idx],\n 'label': COCO_INSTANCE_CATEGORY_NAMES[labels[idx]],\n 'score': scores[idx]\n })\n return predictions\n\ndef draw_boxes(image, predictions):\n size_factor = min(image.shape[:-1]) // 500 + 1\n for obj in predictions: \n image = cv2.rectangle(\n img = image,\n pt1 = (int(obj['box'][0].round()), int(obj['box'][1].round())),\n pt2 = (int(obj['box'][2].round()), int(obj['box'][3].round())),\n color = (0, 0, 255),\n thickness = size_factor\n )\n image = cv2.putText(\n img = image,\n text = obj['label'],\n org = (obj['box'][0], obj['box'][1] - 5),\n fontFace = cv2.FONT_HERSHEY_PLAIN,\n fontScale = size_factor,\n color = (0, 0, 255),\n thickness = size_factor\n )\n return image\n\ndef decode_image_file(image_file):\n filestr = image_file.read()\n npimg = np.fromstring(filestr, np.uint8)\n image = cv2.imdecode(npimg, cv2.IMREAD_COLOR)\n return image\n\ndef detect(image_file):\n image = decode_image_file(image_file)\n image = add_border(image)\n tensor = cv2_to_tensor(image)\n output = model(tensor)\n predictions = process_predictions(output)\n image = draw_boxes(image, predictions)\n retval, buffer = cv2.imencode('.png', image)\n data_uri = base64.b64encode(buffer).decode('ascii')\n return data_uri, predictions\n\nif __name__ == \"__main__\":\n image = cv2.imread('example_input.jpg', cv2.IMREAD_COLOR)\n image = add_border(image)\n tensor = cv2_to_tensor(image)\n output = model(tensor)\n predictions = process_predictions(output)\n print(predictions)\n image = draw_boxes(image, predictions)\n show_image(image)\n \n \n","sub_path":"flask/app/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"65257654","text":"from scipy.io import loadmat\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.metrics import accuracy_score\n\nmat = loadmat(\"twoClassData.mat\")\nprint(mat.keys())\nX = mat['X']\ny = mat['y'].ravel()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)\n\nclf = KNeighborsClassifier()\nclf.fit(X_train, y_train)\ny_prd = clf.predict(X_test)\nscore = accuracy_score(y_test, y_prd)\nprint('KNN SCORE: ' + str(score))\n\nclf = LinearDiscriminantAnalysis()\nclf.fit(X_train, y_train)\ny_prd = clf.predict(X_test)\nscore = accuracy_score(y_test, y_prd)\nprint('KNN SCORE: ' + str(score))\n\n","sub_path":"ex2/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"131504059","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance\n\nimport imghdr\nimport numpy\nimport os\nimport scipy.misc\n\n\ndataDirectory = \"faces/trainingSet\"\nvariance = 0.95\n\ndef enumerateImagePaths(dataDirectory1):\n filenames = list()\n for root, _, files in os.walk(dataDirectory1):\n path = root.split('/')\n for f in files:\n filename = os.path.join(root, f)\n if imghdr.what(filename):\n filenames.append(filename)\n return filenames\n\ndef printTitle(titleToPrint):\n print(\"\\n\\n############################################\\n%s\\n\" % titleToPrint)\n\ndef compareVisualyFace(faceList):\n index = 1\n f = plt.figure()\n for name in faceList:\n f.add_subplot(len(faceList), 2, index)\n index += 1\n plt.imshow(scipy.misc.imread(name.split(\" \")[0]), cmap=plt.cm.Greys_r)\n f.add_subplot(len(faceList), 2, index)\n index += 1\n plt.imshow(scipy.misc.imread(name.split(\" \")[1]), cmap=plt.cm.Greys_r)\n plt.show()\n\n\ndef printResults(title, facesList):\n printTitle(title)\n \n print(\"%25s %25s %20s\" % (\"Face\",\"Recognized as\",\"Distance\"))\n for nameStr in facesList:\n print(\"%25s %25s %20s\" % (nameStr.split(\" \")[0].split(\"/\")[-1], nameStr.split(\" \")[1].split(\"/\")[-1], nameStr.split(\" \")[2]))\n compareVisualyFace(facesList)\n\n\n\ndef calcAverageFace(trainingSet):\n avgFace = numpy.zeros(trainingSet[0].shape)\n \n for image in trainingSet:\n avgFace += image/float(len(trainingSet))\n \n return avgFace.astype(int)\n\n\nfilenames = enumerateImagePaths(dataDirectory)\ntrainingImageNames = filenames\nnumTrainingFaces = len(trainingImageNames)\n\nmaxWeight = 0\n\n# Calc average face - normalize database\n\ntrainingImages = list()\n\nfor name in trainingImageNames:\n trainingImages.append( scipy.misc.imread(name) )\n\naverageFace = calcAverageFace(trainingImages)\ntrainingImages = [ image - averageFace for image in trainingImages ] \n\n# Calculate eigenvectors\n\nx,y = trainingImages[0].shape\nn = x*y\nA = numpy.matrix( numpy.zeros((n,numTrainingFaces)) )\n\nfor i,image in enumerate(trainingImages):\n A[:,i] = numpy.reshape(image,(n,1))\n\nM = A.transpose()*A\neigenvalues, eigenvectors = numpy.linalg.eig(M)\nindices = eigenvalues.argsort()[::-1]\neigenvalues = eigenvalues[indices]\neigenvectors = eigenvectors[:,indices]\n\neigenvalueSum = sum(eigenvalues)\npartialSum = 0.0\nnumEffectiveEigenvalues = 0\n\nfor index,eigenvalue in enumerate(eigenvalues):\n partialSum += eigenvalue\n if partialSum / eigenvalueSum >= variance:\n numEffectiveEigenvalues = index+1\n break\n\nV = numpy.matrix( numpy.zeros((n,numEffectiveEigenvalues)) )\nfor i in range(numEffectiveEigenvalues):\n V[:,i] = A*eigenvectors[:,i].real\n\n# Calc weights\n\npersonWeights = dict()\n\nfor name in filenames:\n image = scipy.misc.imread(name)\n\n image = image - averageFace \n\n weights = list()\n\n for i in range(numEffectiveEigenvalues):\n weights.append( (V[:,i].transpose() * image.reshape((n,1))).tolist()[0][0] )\n \n if maxWeight < max(weights):\n maxWeight = max(weights)\n \n personWeights[name] = weights\n\n# End Training\n\n# Start recognition\n\n# TODO: Add path and filenames\nunknownDirectory = \"faces/unknown\"\nfilesToRecognize = enumerateImagePaths(unknownDirectory)\noverThreshold = list()\nnotRecognized = list()\nrecognizedFaces = list()\n\n\n#tempT = list() \n\nfor nameToRecognize in filesToRecognize:\n image = scipy.misc.imread(nameToRecognize) \n\n # Convert UNKNOWN face to FaceVector\n # Normalize vector by image - averageFace\n image = image - averageFace\n\n unknownWeights = list()\n d = maxWeight \n resultName = str()\n\n # Convert normalized vector to eigenspace\n # Create weights\n for i in range(numEffectiveEigenvalues):\n unknownWeights.append( (V[:,i].transpose() * image.reshape((n,1))).tolist()[0][0] )\n\n for name,weights in personWeights.iteritems():\n # Calculate d = ||W - Wk||^2\n tempD = scipy.spatial.distance.euclidean(unknownWeights , weights)\n if tempD < d:\n d = tempD\n resultName = name\n\n #tempT.append(d)\n\n # TODO: How calc threshold???\n # d < threshold then face K [print filename - recognized person]\n if d < 353495804:\n recognizedFaces.append(\"%s %s %d\" % (nameToRecognize, resultName, d))\n elif d > 353495803 and d < 635087668:\n notRecognized.append(\"%s %s %d\" % (nameToRecognize, resultName, d))\n else:\n overThreshold.append(\"%s %s %d\" % (nameToRecognize, resultName, d))\n\n\nprintResults(\"Recognized\", recognizedFaces)\nprintResults(\"NOT Recognized\", notRecognized)\nprintResults(\"Totaly not recognized\", overThreshold)\n\nprint(\"\\n\\nRecognized: %d/%d\" % (len(recognizedFaces), len(filesToRecognize)))\n#print(sorted(tempT, key=int))\n","sub_path":"script/eigenfaces.py","file_name":"eigenfaces.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"484247339","text":"from app import app, db\r\nfrom flask import render_template, redirect, url_for, flash, request\r\nfrom app.models import Products\r\nfrom app.forms import CreateProduct, EditProduct\r\n\r\n\r\n@app.route('/')\r\n@app.route('/index')\r\ndef index():\r\n return redirect(url_for('products'))\r\n\r\n\r\n@app.route('/products', methods=['GET', 'POST'])\r\ndef products():\r\n form = CreateProduct()\r\n if form.validate_on_submit():\r\n new_product = Products(name=form.name.data, price=form.price.data, image=form.image.data)\r\n db.session.add(new_product)\r\n db.session.commit()\r\n flash('Product added')\r\n return redirect(url_for('products', _anchor=\"msg\"))\r\n\r\n page = request.args.get('page', 1, type=int)\r\n\r\n #Query the DB to get all items (ascending order) and adding SQLAlchemy pagination\r\n products = Products.query.order_by(Products.price.asc()).paginate(page, app.config['PRODUCTS_PER_PAGE'], False)\r\n\r\n #Create next or prev links\r\n next_url = url_for('products', page=products.next_num) if products.has_next else None\r\n prev_url = url_for('products', page=products.prev_num) if products.has_prev else None\r\n\r\n return render_template('products.html', form=form,\r\n products=products,\r\n next_url=next_url,\r\n prev_url=prev_url)\r\n\r\n\r\n\r\n@app.route('/products/id/', methods=['GET', 'POST'])\r\ndef product_page(id):\r\n\r\n # query the DB for the product matching the ID. Returns 404 if id not found\r\n product = Products.query.filter_by(id=id).first_or_404()\r\n\r\n form = EditProduct()\r\n if form.validate_on_submit():\r\n product.name = form.name.data\r\n product.price = form.price.data\r\n product.image = form.image.data\r\n db.session.commit()\r\n return redirect(url_for('product_page', id=product.id))\r\n elif request.method == 'GET':\r\n form.name.data = product.name\r\n form.price.data = product.price\r\n form.image.data = product.image\r\n\r\n return render_template('product_page.html', product=product, form=form)\r\n\r\n\r\n@app.route('/products/delete//delete', methods=['POST'])\r\ndef product_deletion(id):\r\n delete_product = Products.query.get_or_404(id)\r\n db.session.delete(delete_product)\r\n db.session.commit()\r\n flash('Product deleted')\r\n\r\n return redirect(url_for('products'))\r\n\r\n\r\n@app.route('/products/kids')\r\ndef kids():\r\n\r\n #08-04-2019 --> No products in the API matched \"kids\" or \"kid_adult\"\r\n\r\n return \"

Nothing here

\"\r\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"369810393","text":"################################################################################\n# Implement the simple monitor module #\n# #\n# $./pyretic.py -v low -m r0 simple_monitor #\n################################################################################\n\nfrom pyretic.lib.corelib import *\nfrom pyretic.core import packet\nfrom pyretic.lib.query import *\n\n#mac1 = EthAddr('C0:FF:EE:00:BA:BE')\n#ip1 = IPAddr('123.45.67.89')\nip_h2 = IPAddr('10.0.0.2')\nip_srv = IPAddr('10.0.0.4')\n# DONE\n# mon prints every access to the ssh service on srv as well as all traffic from h2\nssh_to_mon = match(dstip=ip_srv, dstport=9090, ethtype=packet.IPV4, protocol=packet.TCP_PROTO) >> fwd(3)\nh2_to_mon = match(srcip=ip_h2) >> fwd(3)\nmonitor = ssh_to_mon + h2_to_mon\ndef main():\n return monitor\n","sub_path":"sdn-assign-05/remote_sshfs_ex5/simple_monitor.py","file_name":"simple_monitor.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"126694293","text":"\ndef maj_elem_io(in_file, out_file):\n f = open(in_file, 'r')\n num_arrs, arr_size = f.readline().strip().split(\" \")\n f2 = open(out_file, 'w')\n for i in range(int(num_arrs)):\n arr = f.readline().strip().split(\" \")\n arr = [int(x) for x in arr]\n ret = maj_elem(arr, int(arr_size))\n f2.write(str(ret))\n f2.write(\" \")\n\ndef maj_elem(arr, arr_size):\n counts = {}\n for elem in arr:\n counts[elem] = counts.get(elem, 0)\n counts[elem] += 1\n if counts[elem] > arr_size/2:\n return elem\n\n return -1\n","sub_path":"majority_element/majority_elem.py","file_name":"majority_elem.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"283396457","text":"# Copyright (C) 2015 Catalyst IT Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom adjutant.actions.v1.base import BaseAction, ProjectMixin, QuotaMixin\nfrom adjutant.actions.utils import validate_steps\nfrom adjutant.common import openstack_clients, user_store\nfrom adjutant.api import models\nfrom adjutant.common.quota import QuotaManager\n\nfrom django.utils import timezone\nfrom django.conf import settings\n\nfrom datetime import timedelta\n\n\nclass NewDefaultNetworkAction(BaseAction, ProjectMixin):\n \"\"\"\n This action will setup all required basic networking\n resources so that a new user can launch instances\n right away.\n \"\"\"\n\n required = [\n 'setup_network',\n 'project_id',\n 'region',\n ]\n\n def __init__(self, *args, **kwargs):\n super(NewDefaultNetworkAction, self).__init__(*args, **kwargs)\n\n def _validate_region(self):\n if not self.region:\n self.add_note('ERROR: No region given.')\n return False\n\n id_manager = user_store.IdentityManager()\n region = id_manager.get_region(self.region)\n if not region:\n self.add_note('ERROR: Region does not exist.')\n return False\n\n self.add_note('Region: %s exists.' % self.region)\n return True\n\n def _validate_defaults(self):\n defaults = self.settings.get(self.region, {})\n\n if not defaults:\n self.add_note('ERROR: No default settings for region %s.' %\n self.region)\n return False\n return True\n\n def _validate(self):\n self.action.valid = validate_steps([\n self._validate_region,\n self._validate_project_id,\n self._validate_defaults,\n self._validate_keystone_user_project_id,\n ])\n self.action.save()\n\n def _create_network(self):\n neutron = openstack_clients.get_neutronclient(region=self.region)\n defaults = self.settings.get(self.region, {})\n\n if not self.get_cache('network_id'):\n try:\n network_body = {\n \"network\": {\n \"name\": defaults['network_name'],\n 'tenant_id': self.project_id,\n \"admin_state_up\": True\n }\n }\n network = neutron.create_network(body=network_body)\n except Exception as e:\n self.add_note(\n \"Error: '%s' while creating network: %s\" %\n (e, defaults['network_name']))\n raise\n self.set_cache('network_id', network['network']['id'])\n self.add_note(\"Network %s created for project %s\" %\n (defaults['network_name'],\n self.project_id))\n else:\n self.add_note(\"Network %s already created for project %s\" %\n (defaults['network_name'],\n self.project_id))\n\n if not self.get_cache('subnet_id'):\n try:\n subnet_body = {\n \"subnet\": {\n \"network_id\": self.get_cache('network_id'),\n \"ip_version\": 4,\n 'tenant_id': self.project_id,\n 'dns_nameservers': defaults['DNS_NAMESERVERS'],\n \"cidr\": defaults['SUBNET_CIDR']\n }\n }\n subnet = neutron.create_subnet(body=subnet_body)\n except Exception as e:\n self.add_note(\n \"Error: '%s' while creating subnet\" % e)\n raise\n self.set_cache('subnet_id', subnet['subnet']['id'])\n self.add_note(\"Subnet created for network %s\" %\n defaults['network_name'])\n else:\n self.add_note(\"Subnet already created for network %s\" %\n defaults['network_name'])\n\n if not self.get_cache('router_id'):\n try:\n router_body = {\n \"router\": {\n \"name\": defaults['router_name'],\n \"external_gateway_info\": {\n \"network_id\": defaults['public_network']\n },\n 'tenant_id': self.project_id,\n \"admin_state_up\": True\n }\n }\n router = neutron.create_router(body=router_body)\n except Exception as e:\n self.add_note(\n \"Error: '%s' while creating router: %s\" %\n (e, defaults['router_name']))\n raise\n self.set_cache('router_id', router['router']['id'])\n self.add_note(\"Router created for project %s\" %\n self.project_id)\n else:\n self.add_note(\"Router already created for project %s\" %\n self.project_id)\n\n if not self.get_cache('port_id'):\n try:\n interface_body = {\n \"subnet_id\": self.get_cache('subnet_id')\n }\n interface = neutron.add_interface_router(\n self.get_cache('router_id'), body=interface_body)\n except Exception as e:\n self.add_note(\n \"Error: '%s' while attaching interface\" % e)\n raise\n self.set_cache('port_id', interface['port_id'])\n self.add_note(\"Interface added to router for subnet\")\n else:\n self.add_note(\n \"Interface added to router for project %s\" % self.project_id)\n\n def _pre_approve(self):\n # Note: Do we need to get this from cache? it is a required setting\n # self.project_id = self.action.task.cache.get('project_id', None)\n self._validate()\n\n def _post_approve(self):\n self._validate()\n\n if self.setup_network and self.valid:\n self._create_network()\n\n def _submit(self, token_data):\n pass\n\n\nclass NewProjectDefaultNetworkAction(NewDefaultNetworkAction):\n \"\"\"\n A variant of NewDefaultNetwork that expects the project\n to not be created until after post_approve.\n \"\"\"\n\n required = [\n 'setup_network',\n 'region',\n ]\n\n def _pre_validate(self):\n # Note: Don't check project here as it doesn't exist yet.\n self.action.valid = validate_steps([\n self._validate_region,\n self._validate_defaults,\n ])\n self.action.save()\n\n def _validate(self):\n self.action.valid = validate_steps([\n self._validate_region,\n self._validate_project_id,\n self._validate_defaults,\n ])\n self.action.save()\n\n def _pre_approve(self):\n self._pre_validate()\n\n def _post_approve(self):\n self.project_id = self.action.task.cache.get('project_id', None)\n self._validate()\n\n if self.setup_network and self.valid:\n self._create_network()\n\n\nclass UpdateProjectQuotasAction(BaseAction, QuotaMixin):\n \"\"\" Updates quota for a project to a given size in a list of regions \"\"\"\n\n required = [\n 'size',\n 'project_id',\n 'regions',\n ]\n\n default_days_between_autoapprove = 30\n\n def __init__(self, *args, **kwargs):\n super(UpdateProjectQuotasAction, self).__init__(*args, **kwargs)\n self.size_difference_threshold = settings.TASK_SETTINGS.get(\n self.action.task.task_type, {}).get(\n 'size_difference_threshold')\n\n def _get_email(self):\n\n if settings.USERNAME_IS_EMAIL:\n return self.action.task.keystone_user['username']\n else:\n id_manager = user_store.IdentityManager()\n user = id_manager.users.get(self.keystone_user['user_id'])\n email = user.email\n if email:\n return email\n\n self.add_note(\"User email address not set.\")\n return None\n\n def _validate_quota_size_exists(self):\n size_list = settings.PROJECT_QUOTA_SIZES.keys()\n if self.size not in size_list:\n self.add_note(\"Quota size: %s does not exist\" % self.size)\n return False\n return True\n\n def _set_region_quota(self, region_name, quota_size):\n # Set the quota for an individual region\n quota_settings = settings.PROJECT_QUOTA_SIZES.get(quota_size, {})\n if not quota_settings:\n self.add_note(\n \"Project quota not defined for size '%s' in region %s.\" % (\n quota_size, region_name))\n return\n\n quota_manager = QuotaManager(self.project_id,\n self.size_difference_threshold)\n\n quota_manager.set_region_quota(region_name, quota_settings)\n\n self.add_note(\"Project quota for region %s set to %s\" % (\n region_name, quota_size))\n\n def _can_auto_approve(self):\n wait_days = self.settings.get('days_between_autoapprove',\n self.default_days_between_autoapprove)\n task_list = models.Task.objects.filter(\n completed_on__gte=timezone.now() - timedelta(days=wait_days),\n task_type__exact=self.action.task.task_type,\n cancelled__exact=False,\n project_id__exact=self.project_id)\n\n changed_in_period = False\n # Check to see if there have been any updates in the relavent regions\n # recently\n for task in task_list:\n for action in task.actions:\n intersect = set(action.action_data[\n 'regions']).intersection(self.regions)\n if intersect:\n changed_in_period = True\n\n region_sizes = []\n\n quota_manager = QuotaManager(self.project_id,\n self.size_difference_threshold)\n\n for region in self.regions:\n current_size = quota_manager.get_region_quota_data(\n region, include_usage=False)['current_quota_size']\n region_sizes.append(current_size)\n self.add_note(\n \"Project has size '%s' in region: '%s'\" %\n (current_size, region))\n\n # Check for preapproved_quotas\n preapproved_quotas = []\n smaller_quotas = []\n\n # If all region sizes are the same\n if region_sizes.count(region_sizes[0]) == len(region_sizes):\n preapproved_quotas = quota_manager.get_quota_change_options(\n region_sizes[0])\n smaller_quotas = quota_manager.get_smaller_quota_options(\n region_sizes[0])\n\n if self.size in smaller_quotas:\n self.add_note(\n \"Quota size '%s' is in list of smaller quotas: %s\" %\n (self.size, smaller_quotas))\n return True\n\n if changed_in_period:\n self.add_note(\n \"Quota has already been updated within the auto \"\n \"approve time limit.\")\n return False\n\n if self.size not in preapproved_quotas:\n self.add_note(\n \"Quota size '%s' not in preapproved list: %s\" %\n (self.size, preapproved_quotas))\n return False\n\n self.add_note(\n \"Quota size '%s' in preapproved list: %s\" %\n (self.size, preapproved_quotas))\n return True\n\n def _validate(self):\n # Make sure the project id is valid and can be used\n self.action.valid = validate_steps([\n self._validate_project_id,\n self._validate_quota_size_exists,\n self._validate_regions_exist,\n self._validate_usage_lower_than_quota,\n ])\n self.action.save()\n\n def _pre_approve(self):\n self._validate()\n # Set auto-approval\n self.set_auto_approve(self._can_auto_approve())\n\n def _post_approve(self):\n self._validate()\n\n if not self.valid or self.action.state == \"completed\":\n return\n\n # Use manager here instead, it will make it easier to add has_more\n # in later\n for region in self.regions:\n self._set_region_quota(region, self.size)\n\n self.action.state = \"completed\"\n self.action.task.cache['project_id'] = self.project_id\n self.action.task.cache['size'] = self.size\n\n self.action.save()\n\n def _submit(self, token_data):\n \"\"\"\n Nothing to do here. Everything is done at post_approve.\n \"\"\"\n pass\n\n\nclass SetProjectQuotaAction(UpdateProjectQuotasAction):\n \"\"\" Updates quota for a given project to a configured quota level \"\"\"\n required = []\n\n def _get_email(self):\n return None\n\n def _validate(self):\n # Make sure the project id is valid and can be used\n self.action.valid = validate_steps([\n self._validate_project_id,\n ])\n self.action.save()\n\n def _pre_approve(self):\n # Nothing to validate yet\n self.action.valid = True\n self.action.save()\n\n def _post_approve(self):\n # Assumption: another action has placed the project_id into the cache.\n self.project_id = self.action.task.cache.get('project_id', None)\n self._validate()\n\n if not self.valid or self.action.state == \"completed\":\n return\n\n # update quota for each openstack service\n regions_dict = self.settings.get('regions', {})\n for region_name, region_settings in regions_dict.items():\n quota_size = region_settings.get('quota_size')\n self._set_region_quota(region_name, quota_size)\n\n self.action.state = \"completed\"\n self.action.save()\n\n def _submit(self, token_data):\n pass\n","sub_path":"adjutant/actions/v1/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":14366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"2111295","text":"# Copyright 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tests.utils\nfrom tests.utils import dedent\n\n\nclass PythonLibraryTest(tests.utils.TestCase):\n includes = [(\"@fbcode_macros//build_defs:python_library.bzl\", \"python_library\")]\n\n @tests.utils.with_project()\n def test_python_library_parses(self, root):\n root.addFile(\n \"BUCK\",\n dedent(\n \"\"\"\n load(\"@fbcode_macros//build_defs:python_library.bzl\", \"python_library\")\n python_library(\n name = \"my_python_lib\",\n base_module = \"terrible.feature\",\n cpp_deps = [\"//:cpp_lib\"],\n deps = [\"//:python_lib\"],\n external_deps = [\n \"pyyaml\",\n (\"six\", None, \"six\"),\n ],\n gen_srcs = [\"//:deprecated=feature.py\"],\n py_flavor = \"\",\n resources = {\"src.py\": \"dest.py\"},\n srcs = [\n \"not_a_python_source\",\n \"src.py\",\n ],\n tags = [\"foo\"],\n tests = [\"//:python_lib_test\"],\n typing = True,\n typing_options = \"pyre\",\n versioned_srcs = [(\">2\", [\"new.py\"])],\n )\n \"\"\"\n ),\n )\n\n self.assertSuccess(root.runAudit([\"BUCK\"]))\n","sub_path":"infra_macros/fbcode_macros/tests/python_library_test.py","file_name":"python_library_test.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"360522629","text":"fin = open('numbers.in')\r\nfout = open('numbers.out', 'w')\r\nn = int(fin.readline().strip())\r\nset_first = set()\r\nconn = dict()\r\nfor i in range(n):\r\n fr, m = fin.readline().split()\r\n m = int(m)\r\n set_first.add(fr)\r\n conn[fr] = set()\r\n for j in range(m):\r\n conn[fr].add(fin.readline().strip())\r\nn = int(fin.readline().strip())\r\nfor k in range(n):\r\n s = fin.readline().strip()\r\n if s[0] == '0':\r\n print('Incorrect', file = fout)\r\n continue\r\n check = False\r\n for i in range(1, 4):\r\n if s[i] != '0' and s[:i] in set_first:\r\n fr = s[:i]\r\n for j in range(i + 3, i + 6):\r\n if s[j] != '0' and s[i:j] in conn[fr]:\r\n print('+', fr, '(', s[i:j], ')', sep = '', end = '', file = fout)\r\n ost = 11 - j\r\n if ost == 3:\r\n print(s[j:], file = fout)\r\n elif ost == 4:\r\n print(s[j:j + 2], s[j + 2:], sep = '-', file = fout)\r\n elif ost == 5:\r\n print(s[j:j + 3], s[j + 3:], sep = '-', file = fout)\r\n elif ost == 6:\r\n print(s[j:j + 2], s[j + 2:j + 4], s[j + 4:], sep = '-', file = fout)\r\n else:\r\n print(s[j:j + 3], s[j + 3:j + 5], s[j + 5:], sep = '-', file = fout)\r\n check = True\r\n break\r\n if check:\r\n break\r\n if not check:\r\n print('Incorrect', file = fout)\r\nfout.close()","sub_path":"data/russia-team-e/sources/220-e-3.py","file_name":"220-e-3.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"192935906","text":"# coding=gbk\nfrom app import app, db\nfrom app.models import User, Contest, Request, Student, Teacher, Team, Award, team_student, Notice\nimport random\n\n# 添加数据专用\n\nname_list = ['赵泽晨', '赵子桐', '赵建川', '赵琦锐', '赵家妍', '赵宇琪', '赵佳鑫', '赵语彤', '赵紫睿', '赵天琪', '赵懿轩', '赵子鑫', '赵谨瑶', '赵玉涵', '赵俊辰', '赵泊君', '赵无名', '赵书瑶', '赵若涵', '赵涵睿', '赵栾涵', '赵宇涵', '赵涵恩', '赵韶涵', '赵语涵', '赵杰', '赵浩航', '赵绎涵', '赵靖巍', '赵楷', '赵鹏奕', '赵煦', '赵煦恺', '赵子瑜', '赵凯瑞', '赵逸霏', '赵振', '赵振楦', '赵振暄', '赵振煊', '赵振璇', '赵振萱', '赵乐珊', '赵晶婧', '赵婧晶', '赵婧婧', '赵旭玉', '赵婧玉', '赵瑾瑜', '赵静怡', '赵婧怡', '赵婧瑜', '赵闯', '赵美玲', '赵晨怡', '赵建宇', '赵铁皖', '赵麓丞', '赵骧鑫', '赵誉惘', '赵略竖', '赵太逖', '赵内驱', '赵内藩', '赵斓夷', '赵若彤', '赵亦俊', '赵奕俊', '赵建伟', '赵暖暖', '赵仡浚', '赵屹浚', '赵奕浚', '赵奕骏', '赵屹骏', '赵仡骏',\n '王满', '王琳', '王锐', '王艺博', '王伟', '王辰硕', '王鸿轩', '王涵润', '王涵涵', '王兴', '王淳曦', '王雨微', '王钧涵', '王浩晏', '王芊语', '王乐怡', '王皓月', '王文田', '王文田', '王文田', '王禹勋', '王思卓', '王国珍', '王建', '王天佑', '王昕', '王玥婷', '王浩南', '王玥雯', '王天睿', '王雨辰', '王靖雯', '王镜雯', '王翠楠', '王镜文', '王静文', '王静雯', '王楠', '王之骏', '王子骏', '王九雏', '王韬茫', '王麒鄄', '王丹', '王椒勃', '王泊君', '王诗议', '王思馨', '王誉涵', '王思思', '王梦菲', '王贤博', '王博琨', '王复贤', '王博贤', '王博毅', '王博逸', '王肇博', '王博儒', '王傲野', '王韬韧', '王傲瑜', '王傲璇', '王傲煦', '王傲昭', '王韬博', '王傲琬', '王傲玥', '王傲���', '王傲漾',\n '刘佳乐', '刘慧娴', '刘嘉源', '刘盈锐', '刘德华', '刘娜', '刘欣玥', '刘晗玥', '刘益嘉', '刘如玥', '刘兆祥', '刘永昌', '刘泽林', '刘国佩', '刘佳绮', '刘哲宇', '刘佳晰', '刘佳琦', '刘长鑫', '刘佳萱', '刘思哲', '刘长洪', '刘长润', '刘宸旭', '刘毅', '刘森柱', '刘永胜', '刘昊天', '刘新奇', '刘文军', '刘鑫源', '刘永旭', '刘希岭', '刘希玲', '刘国荣', '刘永震', '刘新琦', '刘一越', '刘一玑', '刘越', '刘慕瑶', '刘雨林', '刘尚直', '刘尚猛', '刘竞朗', '刘轩铭', '刘辕铭', '刘约礼', '刘金霞', '刘菡卿', '刘红', '刘梓恒', '刘智赟', '刘应琴', '刘帥希', '刘宇希',\n '张观博', '张欣竹', '张欣阳', '张刚军', '张扬阳', '张靖阳', '张熙阳', '张嘉萱', '张铭阳', '张飞', '张雨荨', '张文博', '张诗含', '张诗若', '张辰海', '张晓雨', '张展鸣', '张晓春', '张洪文', '张默', '张轩杰', '张金海', '张俊杰', '张展旭', '张建烁', '张婧琪', '张婧涵', '张诗晴', '张传浩', '张怡萍', '张诗涵', '张雅婷', '张雅涵', '张萍', '张晓萍', '张兴飞', '张小平', '张建龙', '张宇谟', '张子辰', '张辰', '张湍灵', '张骅株', '张春莲', '张娟敏', '张智涵', '张欣妍', '张慧妍', '张雅静', '张月婷', '张雨婷', '张芸馨', '张韵涵', '张涵韵', '张雨欣', '张馨蕾', '张静媛', '张子涵', '张雨泽', '张静蕾', '张茛淯', '张珑沧', '张芮娟', '张梓萱', '张轶诚', '张嘉文', '张晓朋', '张一凡', '张昊楠', '张浩楠', '张瑞君', '张佳宁', '张雨杨', '张昊然', '张浩然', '张滕浩', '张雨菡', '张海一', '张晨宸', '张之政', '张晨菲', '张修闻', '张宁夫', '张轩',\n '杨文锦', '杨泽晨', '杨博瀚', '杨伊珂', '杨子桐', '杨雨桐', '杨雅涵', '杨建川', '杨琦锐', '杨琦炜', '杨子瑾', '杨子辰', '杨炳', '杨鸣鹤', '杨景宜', '杨乐乐', '杨雨潼', '杨涛了', '杨淼', '杨铭', '杨宇欣', '杨丽华', '杨旭', '杨旭芳', '杨亚悫', '杨亚兰', '杨子一', '杨海辰', '杨君浩', '杨焙元', '杨文博', '杨金鹏', '杨荣', '杨坤', '杨绍文', '杨换', '杨曦', '杨浩然', '杨铭羽', '杨浩宇', '杨思辰', '杨悦熙', '杨海英', '杨艾潼', '杨惟岚', '杨蓓', '杨馨媛', '杨佩林', '杨佩云', '杨子琦', '杨泽硕', '杨泽涛', '杨涛', '杨国涛', '杨雅洁', '杨静涵', '杨帆', '杨若雪', '杨淑颖', '杨倩雪', '杨漫妮', '杨锋', '杨睿渊']\n\n\ndef add_student(start, end):\n ids = range(start, end)\n password = 1\n types = ['机械工程', '软件工程', '工业工程', '自动化', '电子信息工程', '汽车服务工程']\n for id in ids:\n major_types = random.choice(types)\n tel = random.randint(13000000000,19000000000)\n # print(major_types, tel)\n # name = ''.join(random.sample(\n # ['z', 'y', 'x', 'w', 'v', 'u', 't', 's', 'r', 'q', 'p', 'o', 'n', 'm', 'l', 'k', 'j', 'i', 'h', 'g', 'f',\n # 'e', 'd', 'c', 'b', 'a'], 5))\n username = random.choice(name_list)\n stu = Student(user_id=id,major_in=major_types,tel_num=tel, username=username)\n stu.set_password(str(password))\n db.session.add(stu)\n # stu = Student.query.get(id)\n # print(stu.username)\n db.session.commit()\n\n\ndef edit_user(start, end):\n for i in range(start, end):\n user = User.query.get(i)\n if user:\n user.username = random.choice(name_list)\n db.session.commit()\n\n\ndef add_teacher(start, end):\n ids = range(start, end)\n password = 1\n # types = ['机械工程', '软件工程', '工业工程', '自动化', '电子信息工程', '汽车服务工程']\n for id in ids:\n # major_types = random.choice(types)\n tel = random.randint(13000000000,19000000000)\n types = random.randint(0,1)\n # print(major_types, tel)\n name = ''.join(random.sample(\n ['z', 'y', 'x', 'w', 'v', 'u', 't', 's', 'r', 'q', 'p', 'o', 'n', 'm', 'l', 'k', 'j', 'i', 'h', 'g', 'f',\n 'e', 'd', 'c', 'b', 'a'], 5))\n username = 'tea_' + name\n email = str(id) + '@test.com'\n stu = Teacher(user_id=id, tea_type=types,tel_num=tel, username=username, email=email)\n stu.set_password(str(password))\n db.session.add(stu)\n # stu = Student.query.get(id)\n # print(stu.username)\n db.session.commit()\n\n\nimport time, datetime\ndef randomtimes(start, end, frmt=\"%Y-%m-%d\"):\n stime = time.mktime(time.strptime(start, frmt))\n etime = time.mktime(time.strptime(end, frmt))\n\n ptime = stime + random.random() * (etime - stime)\n return time.strftime(frmt, time.localtime(ptime))\n\n\ndef add_contest(start, end):\n ids = range(start, end)\n password = 1\n types = ['科技', '人文', '体育', '理科', '综合']\n levels = ['校级', '市级', '省级', '国家级', '国际级']\n\n for id in ids:\n # major_types = random.choice(types)\n # tel = random.randint(13000000000,19000000000)\n # types = random.randint(0,1)\n # print(major_types, tel)\n # name = ''.join(random.sample(\n # ['z', 'y', 'x', 'w', 'v', 'u', 't', 's', 'r', 'q', 'p', 'o', 'n', 'm', 'l', 'k', 'j', 'i', 'h', 'g', 'f',\n # 'e', 'd', 'c', 'b', 'a'], 5))\n time = randomtimes('2017-01-01', '2020-01-01')\n print(time)\n type = random.choice(types)\n level = random.choice(levels)\n name = '竞赛' + str(id)\n detail = '第' + str(id) + '个竞赛'\n # email = str(id) + '@test.com'\n stu = Contest(contest_name=name, contest_type=type, contest_time=time, details=detail, level=level)\n # stu.set_password(str(password))\n db.session.add(stu)\n # stu = Student.query.get(id)\n # print(stu.username)\n db.session.commit()\n\n\ndef add_request(type,count):\n '''\n 批量添加竞赛申请\n :param type: 队伍有多少人\n :param count: 要添加多少条申请信息\n :return:\n '''\n\n for i in range(count):\n contest_id = random.randint(1, 29)\n teacher = random.randint(200, 220)\n times = randomtimes('2017-01-01', '2019-05-01')\n id1 = random.randint(101, 110)\n id2 = random.randint(111, 120)\n id3 = random.randint(121, 130)\n id4 = random.randint(131, 140)\n if type == 1:\n id2 = None\n id3 = None\n id4 = None\n elif type == 2:\n id3 = None\n id4 = None\n elif type == 3:\n id4 = None\n\n print(id1, id2, id3)\n if not id2:\n req = Request(user_id=id1, contest_id=contest_id, status=0, sup_teacher=teacher,\n add_time=datetime.datetime.now(), user_type=0)\n db.session.add(req)\n else:\n team_name = ''.join(random.sample(\n ['z', 'y', 'x', 'w', 'v', 'u', 't', 's', 'r', 'q', 'p', 'o', 'n', 'm', 'l', 'k', 'j', 'i', 'h', 'g',\n 'f',\n 'e', 'd', 'c', 'b', 'a'], 5))\n team = Team(team_name=team_name)\n\n # student = Student.query.filter_by(user_id=)\n id = id1\n if id:\n team.parts.append(Student.query.get(id))\n id = id2\n if id:\n team.parts.append(Student.query.get(id))\n id = id3\n if id:\n team.parts.append(Student.query.get(id))\n id = id4\n if id:\n team.parts.append(Student.query.get(id))\n # id = id5\n # if id:\n # team.parts.append(Student.query.get(id))\n db.session.add(team)\n # print(team.team_id)\n req = Request(user_id=team.team_id, contest_id=contest_id, status=0, sup_teacher=teacher,\n add_time=datetime.datetime.now(), user_type=1)\n db.session.add(req)\n db.session.commit()\n\n\ndef agree_request(start, end):\n for i in range(start, end):\n print(i)\n request_id = i\n req1 = Request.query.get(request_id)\n req1.status = 1\n award = Award(user_id=req1.user_id, user_type=req1.user_type, contest_id=req1.contest_id,\n sup_teacher=req1.sup_teacher)\n print(req1.user_id)\n db.session.add(award)\n # print(award.award_id)\n db.session.commit()\n\n\ndef award_in(start, end):\n ids = range(start, end)\n types = ['优秀奖', '一等奖', '二等奖', '三等奖', '无']\n for id in ids:\n type = random.choice(types)\n awd = Award.query.get(id)\n awd.grade = type\n db.session.commit()\n\n\n\nimport xlrd\n\ndef get_data(filename, sheetnum): # 获取企业列表,及对应的类型\n dir_case = 'app/file/' + filename + '.xlsx'\n data = xlrd.open_workbook(dir_case)\n table = data.sheets()[sheetnum] # 读取第一个工作簿\n nor = table.nrows # 获取总行数\n # nol = table.ncols\n # print(nor)\n dict = {}\n for i in range(nor):\n title = table.cell_value(i, 0)\n value = table.cell_value(i, 1)\n dict[title] = value\n return dict\n\n\ndef work_in(start, end):\n ids = range(start, end)\n company = get_data('list', 0)\n for id in ids:\n # print(list(company))\n name = random.choice(list(company.keys()))\n type = company[name]\n awd = Student.query.get(id)\n awd.company_name = name\n awd.company_type = type\n awd.salary = random.randrange(4000,12000,1000)\n db.session.commit()\n\n\nimport requests\nfrom lxml import etree\ndef get_university(): # 利用爬虫从研招网上获取学校信息,保存到表格中\n url = \"https://yz.chsi.com.cn/sch/?start={}\"\n\n lists = []\n for i in range(44):\n cur_url = url.format(i * 20)\n html = requests.get(cur_url).text\n xpath_parser = etree.HTML(html)\n univer = xpath_parser.xpath(\"//table[@class='ch-table']//tr/td[1]/a/text()\")\n for i in range(len(univer)):\n univer[i] = univer[i].strip()\n lists.append(univer[i])\n # print(lists)\n\n output = open('C:\\\\Users\\\\MRZhao\\\\Desktop\\\\data.xls', 'w', encoding='gbk')\n for i in range(len(lists)):\n # for j in range(len(list1[i])):\n output.write(str(lists[i])) # write函数不能写int类型的参数,所以使用str()转化\n # output.write('\\t') # 相当于Tab一下,换一个单元格\n output.write('\\n') # 写完一行立马换行\n output.close()\n\n return lists\n\n\ndef study_data(): # 获得所有有研究生招生的学校信息\n dir_case = 'app/file/' + 'study.xlsx'\n data = xlrd.open_workbook(dir_case)\n table = data.sheets()[0]\n nor = table.nrows\n # list_none = get_university()\n list_none, list_211, list_985 = [], [], []\n for i in range(nor):\n type_none = table.cell_value(i, 0)\n type_211 = table.cell_value(i, 1)\n type_985 = table.cell_value(i, 2)\n list_none.append(type_none)\n if type_211:\n list_211.append(type_211)\n if type_985:\n list_985.append(type_985)\n\n for item in list_none[::-1]: # 需要倒序删除,要不连续元素无法正确删除;获得普通学校列表\n if item in list_211:\n list_none.remove(item)\n\n for item in list_211[::-1]: # 需要倒序删除,要不连续元素无法正确删除;获得211高校列表\n if item in list_985:\n list_211.remove(item)\n\n dict1 = {}\n for item in list_none:\n dict1[item] = '普通高校'\n for item in list_211:\n dict1[item] = '211高校'\n for item in list_985:\n dict1[item] = '985高校'\n # print(dict1)\n return dict1\n\n\ndef study_in(start, end):\n ids = range(start, end)\n univer = study_data()\n for id in ids:\n name = random.choice(list(univer.keys()))\n type = univer[name]\n stu = Student.query.get(id)\n stu.college_name = name\n stu.college_type = type\n db.session.commit()\n\n\ndef add_notice(count):\n notice1 = Notice.query.order_by(Notice.id.desc()).first()\n start = int(notice1.id)\n\n for i in range(start, start+count):\n title = 'notice' + str(i)\n text = ''.join(random.sample(\n ['z', 'y', 'x', 'w', 'v', 'u', 't', 's', 'r', 'q', 'p', 'o', 'n', 'm', 'l', 'k', 'j', 'i', 'h', 'g',\n 'f', 'e', 'd', 'c', 'b', 'a', ' ', ','], 20))\n print(text)\n times = randomtimes('2017-01-01', '2019-05-01')\n notice = Notice(title=title,text=text,time=times)\n db.session.add(notice)\n db.session.commit()\n\n# study_in(140, 149)\n# add_notice(10)\n# add_contest(10,30)\n# add_teacher(200, 230)\n# edit_user(1, 230)\n# add_request(1, 50)\n# agree_request(75, 130)\n# award_in(52,106)\n# get_data('list', 0)\n\nfrom pyecharts import Scatter\n\n# v1 = [10, 10, 20, 30, 40, 50, 60]\n# v2 = [10, 10, 20, 30, 40, 50, 60]\n# extra = [1,2,1,1,1,1,5]\n# scatter = Scatter(\"散点图示例\")\n# scatter.add(\"scatter\",\n# v1,\n# v2,\n# extra_data=extra,\n# is_visualmap=True,\n# visual_dimension=2,\n# visual_orient=\"horizontal\",\n# visual_type=\"size\",\n# visual_range=[0, 10],\n# visual_text_color=\"#000\",)\n# scatter.render()\n# work_in(104,110)\n\n\n# data = [\n# [28604, 77, 17096869],\n# [31163, 77.4, 27662440],\n# [1516, 68, 1154605773],\n# [13670, 74.7, 10582082],\n# [28599, 75, 4986705],\n# [29476, 77.1, 56943299],\n# [31476, 75.4, 78958237],\n# [28666, 78.1, 254830],\n# [1777, 57.7, 870601776],\n# [29550, 79.1, 122249285],\n# [2076, 67.9, 20194354],\n# [12087, 72, 42972254],\n# [24021, 75.4, 3397534],\n# [43296, 76.8, 4240375],\n# [10088, 70.8, 38195258],\n# [19349, 69.6, 147568552],\n# [10670, 67.3, 53994605],\n# [26424, 75.7, 57110117],\n# [37062, 75.4, 252847810]\n# ]\n#\n# x_lst = [v[0] for v in data]\n# y_lst = [v[1] for v in data]\n# extra_data = [v[2] for v in data]\n# sc = Scatter()\n# sc.add(\n# \"scatter\",\n# x_lst,\n# y_lst,\n# extra_data=extra_data,\n# tooltip_formatter='个数{c}',\n# is_visualmap=True,\n# visual_dimension=2,\n# visual_orient=\"horizontal\",\n# visual_type=\"size\",\n# visual_range=[254830, 1154605773],\n# visual_text_color=\"#000\",\n# )\n# sc.render()\n\n\n# import numpy as np\n# from scipy.stats import pearsonr\n# # import random\n# #\n# x = [1, 5,2,0,4,2]\n# y = [4000,8000,3000,8000,6000,5000]\n# # y = ['211','958','211','普通','211','211']\n# xnp = np.array(x)\n# ynp = np.array(y)\n# print(pearsonr(x,y)[0])\n#\n# result = {0: 1.1181753789488595, 1: 0.5566080288678394, 2: 0.4718269778030734, 3: 0.48716683119447185, 4: 1.0, 5: 0.1395076201641266, 6: 0.20941558441558442}\n#\n# x,y = [],[]\n# for key,value in result.items():\n# x.append(key)\n# y.append(value)\n# xnp = np.array(x)\n# ynp = np.array(y)\n# print(pearsonr(x,y))\n# names = ['id','data']\n# formats = ['f8','f8']\n# dtype = dict(names = names, formats=formats)\n# array = np.array(result.items(), dtype=dtype)\n# print(repr(array))\n# np.random.seed(0)\n# size=300\n# x=np.random.normal(0,1,size)\n# print(\"Lower noise\",pearsonr(x,x+np.random.normal(0,1,size)))\n# print(\"Higher noise\",pearsonr(x,x+np.random.normal(0,10,size)))\n# from sqlalchemy import func\n# # students = Award.query().filter(Award.user_type==0).group_by(Award.user_id).all()\n# # students1 = Award.query().filter(Award.user_type==0).group_by(Award.user_id).count()\n# ss = db.session.query(Award.user_id, func.count(Award.user_id)).filter(Award.user_type==0).group_by(Award.user_id).all()\n# dict1 = {}\n# for s in ss:\n# print(s[0],s[1])\n# dict1[s[0]] = s[1]\n# # count1 = Award.query.join( # 选出每一类的参赛人数\n# # Contest, (Award.contest_id == Contest.contest_id)).filter(\n# # Contest.contest_type == types[0], Contest.contest_time >= start, Contest.contest_time <= end).count()\n# ss1 = db.session.query(Award.user_id, team_student.c.user_id, func.count(team_student.c.user_id)).\\\n# join(team_student, (team_student.c.team_id == Award.user_id)).\\\n# filter(Award.user_type==1).group_by(team_student.c.user_id).all()\n#\n# print(ss1)\n# dict2 = {}\n# for s in ss1:\n# # print(s[1],':',s[2])\n# dict2[s[1]] = s[2]\n# for key, value in dict2.items():\n# if key in dict1:\n# dict1[key] += value\n# else:\n# dict1[key] = value\n# print(dict1)\n# print(str(ss))\n# for s1,s2 in zip(ss1,ss2):\n# print(':',s2)\n # dict1[s[0]] = s[1]\n# print(dict1)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":18770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"241009950","text":"import re, logging\nimport pytz\nfrom django.conf import settings\nimport cv2\n\nfrom .database_wrapper import *\nfrom .communication_utils import *\nfrom .serialization import *\n\n# This file represents the backend File Manager.\n\nVIDEO_FORMATS = [\"mkv\", \"flv\", \"vob\", \"ogv\", \"ogg\",\n \"264\", \"263\", \"mjpeg\", \"avc\", \"m2ts\",\n \"mts\", \"avi\", \"mov\", \"qt\", \"wmv\", \"mp4\",\n \"m4p\", \"m4v\", \"mpg\", \"mp2\", \"mpeg\",\n \"mpe\", \"mpv\", \"m2v\", \"m4v\", \"3gp\", \"3g2\",\n \"flv\", \"f4v\", \"f4p\", \"f4a\", \"f4b\", \"webm\"]\n\n\ndef get_folders(data: dict) -> (int, dict):\n \"\"\"\n Get all folders in a project.\n\n :param data: Project id.\n :return: Status code, all folders in project in JSON.\n \"\"\"\n try:\n pid = data[PROJECT_ID]\n except KeyError:\n return 400, {} # Bad request\n\n try:\n root_folders = get_folders_in_project(pid=pid)\n except AssertionError:\n return 204, {} # No content\n\n folders = root_folders[::1]\n for f in root_folders:\n folders += get_subfolders_recursive(fid=f.id)\n\n return 200, os_aware({FOLDERS: serialize(folders)})\n\n\ndef add_folder(data: dict) -> (int, dict):\n \"\"\"\n Adds a folder to a project.\n\n :param data: Project id and (absolute) file path to folder.\n :return: Status code, id of given folder.\n \"\"\"\n try:\n pid = data[PROJECT_ID]\n file_path = data[FILE_PATH]\n except KeyError:\n return 400, {} # Bad request\n\n try:\n path, name = split_file_path(file_path=file_path)\n except ValueError:\n return 400, {} # Bad request\n\n fid = create_root_folder(path=path, name=name)\n\n try:\n add_folder_to_project(fid=fid, pid=pid)\n except AssertionError:\n return 204, {} # No content\n\n return 200, os_aware({FOLDER_ID: fid})\n\n\ndef build_file_structure(file_path: str) -> None:\n \"\"\"\n Traverses the user's file system from the given folder downwards while adding all folder and clips to the database.\n\n :param file_path: Absolute path to folder in file system.\n \"\"\"\n # Divide folder path in name and path.\n path, name = split_file_path(file_path=file_path)\n\n # Create root and set parent id.\n parent_id = create_root_folder(path=path, name=name)\n\n # Traverse all subfolders and add the to the database.\n traverse_subfolders(path=file_path, parent_id=parent_id)\n\n\ndef traverse_subfolders(path: str, parent_id: int) -> None:\n \"\"\"\n Recursive helper to build_file_structure.\n\n :param path: The absolute path to a folder.\n :param parent_id: The parent folder's id.\n \"\"\"\n for entry in os.scandir(path): # Iterate over all entries in the folder.\n file_path = os.path.join(path, entry.name) # Save file path to current entry.\n if entry.is_dir():\n fid = create_subfolder(parent_fid=parent_id, name=entry.name)\n traverse_subfolders(path=file_path, parent_id=fid) # Traverse subfolders of entry.\n elif entry.is_file():\n is_clip, name, suffix = analyze_file(entry.name)\n if is_clip:\n try:\n create_clip(**get_clip_info(file_path=file_path, folder_id=parent_id, name=name,\n video_format=suffix))\n except ValueError:\n logging.info(msg=\"No metadata found for: \" + file_path)\n\n\ndef analyze_file(file: str) -> (bool, str, str):\n \"\"\"\n Analyzes a file.\n Decides if it is a clip.\n Finds name and suffix.\n\n :param file: File (str) on the form filename.suffix.\n :return: Tuple with three elements: (is clip, name of file, format of file).\n \"\"\"\n split = file.rsplit(sep='.', maxsplit=1)\n if len(split) != 2:\n raise ValueError(\"Given file is not valid\")\n name = split[0]\n suffix = split[1]\n is_clip = suffix in VIDEO_FORMATS\n return is_clip, name, suffix\n\n\ndef get_clip_info(file_path: str, folder_id: int, name: str, video_format: str) -> dict:\n \"\"\"\n Finds all information related to the clip and returns a dictionary that can be used as input to the\n function create_clip in the database wrapper.\n\n :param file_path: The absolute path to a clip.\n :param folder_id: The clip's parent folder's id.\n :param name: The name of the clip.\n :param video_format: The video format of the clip.\n :return: A dictionary with the valid parameters for create_clip in database_wrapper.py.\n \"\"\"\n latitude, longitude, start_time = parse_metadata(file_path=file_path)\n duration, frame_rate, width, height = get_clip_details(file_path=file_path)\n end_time = start_time + timezone.timedelta(seconds=duration)\n return {'fid': folder_id, 'name': name, 'video_format': video_format, 'start_time': start_time,\n 'end_time': end_time, 'latitude': latitude, 'longitude': longitude, 'width': width, 'height': height,\n 'frame_rate': frame_rate}\n\n\ndef parse_metadata(file_path: str) -> (Decimal, Decimal, timezone.datetime):\n \"\"\"\n Parses a clip's metadata for location and start time.\n\n Metadata has the same name as the clip but with .txt as an extra suffix.\n\n Example of metadata:\n 59°23'19.2\"N 17°55'35.4\"E (59.388668, 17.926501)\n 2018-09-06 15:45:59.603 (2018-09-06 15:45:59)\n\n :param file_path: The absolute path to a clip.\n :return: (latitude: Decimal, longitude, Decimal: datetime.datetime)\n \"\"\"\n wrong_format_error = ValueError(\"Metadata has the wrong format.\")\n\n # Read metadata from file.\n with open(file=file_path + '.txt', mode='r') as f:\n content = f.read()\n\n # Find both parentheses from metadata.\n parentheses = re.findall('\\(.*?\\)', content)\n if len(parentheses) != 2:\n raise wrong_format_error\n\n # Extract latitude and longitude from location parentheses.\n location = re.split(string=parentheses[0][1:-1], pattern=', ')\n if len(location) != 2:\n raise wrong_format_error\n try:\n lat = Decimal(location[0])\n lon = Decimal(location[1])\n except SyntaxError:\n raise wrong_format_error\n\n # Extract start time from time parentheses.\n try:\n start_time = timezone.datetime.strptime(parentheses[1][1:-1], '%Y-%m-%d %H:%M:%S')\n start_time = start_time.replace(tzinfo=pytz.timezone(settings.TIME_ZONE)) # make timezone aware\n except ValueError:\n raise wrong_format_error\n\n return lat, lon, start_time\n\n\ndef get_clip_details(file_path: str) -> (int, float, int, int):\n \"\"\"\n Gets a clip's duration, frame rate and dimensions (width, height).\n\n :param file_path: The absolute path to a clip.\n :return: Duration in seconds, frame rate in FPS and width and height in pixels.\n This is given in the form of a tuple (duration, frame rate, width, height).\n \"\"\"\n # Check if clip exists\n if not os.path.isfile(path=file_path):\n raise FileNotFoundError\n\n cap = cv2.VideoCapture(file_path)\n fps = cap.get(cv2.CAP_PROP_FPS)\n frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n return int(frames/fps), fps, width, height\n\n\ndef split_file_path(file_path: str) -> (str, str):\n \"\"\"\n Splits a file path for path and name.\n\n E.g. home/user/folder -> (home/user/, folder)\n\n :param file_path: A file path.\n :return: The path and the name in the given file path as a tuple.\n \"\"\"\n split = file_path.rsplit(sep=os.path.sep, maxsplit=1)\n if len(split) != 2:\n raise ValueError(\"Given file path is not valid.\")\n path = os.path.join(split[0], '') # Add delimiter to path\n name = split[-1]\n\n return path, name\n","sub_path":"backend/file_manager.py","file_name":"file_manager.py","file_ext":"py","file_size_in_byte":7708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"542364872","text":"from __future__ import division\nimport logging\n\nimport psutil\nimport numpy\nfrom collections import namedtuple\nfrom time import sleep\nimport platform\n\nlogger = logging.getLogger(__name__)\n\n\ndef sysResourceTuple(fieldsList):\n return namedtuple(\"SystemResource\", fieldsList)\n\n\nclass CaptureSysResource(object):\n \"\"\"\n A thread based object, constructor requires a param interval\n Periodically captures resource usage and serialize data into a pickle file\n \"\"\"\n\n def __init__(self, interval):\n self._interval = interval\n self._OSPlatform = platform.system()\n\n def getOverallCPUPercentage(self):\n \"\"\"\n Get overall CPU usage out of 100\n :return: CPU percentage\n \"\"\"\n # Blocking\n return psutil.cpu_percent()\n\n def getPerCPUPercentage(self):\n \"\"\"\n Get each CPU usage out of 100\n :return: A list of all CPU percentage\n \"\"\"\n # non-blocking\n return psutil.cpu_percent(\n percpu=True\n )\n\n def getMemStats(self):\n memStats = psutil.virtual_memory()\n (memAvail, memUsedPerc) = (\n memStats.available,\n memStats.percent\n )\n\n if ('Linux' in self._OSPlatform):\n memBuffered = memStats.buffers\n return (memAvail, memUsedPerc, memBuffered)\n\n return (memAvail, memUsedPerc)\n\n def getSysResourceUsage(self, per_disk, per_nic):\n \"\"\"\n To return Overall cpu usage, per CPU usage which are out of 100%.\n\n :param per_disk: Display usage per disk\n :param per_nic: Return usage per network interface card\n\n :return: A dict contains System HW Resource Usage\n \"\"\"\n\n netIOPre = psutil.net_io_counters(per_nic)\n\n diskIOPre = psutil.disk_io_counters(per_disk)\n\n # discard the first set of perCPU usage data\n self.getOverallCPUPercentage()\n self.getPerCPUPercentage()\n # blocking for {interval}\n sleep(self._interval)\n\n # get overall CPU since last call\n CPUPercent = self.getOverallCPUPercentage()\n # get perCPU since last call\n perCPUPercent = self.getPerCPUPercentage()\n\n # define namedtuples\n ntDisk = namedtuple('ntDisk', 'read_count, write_count, read_bytes, write_bytes, read_time, '\n 'write_time, busy_time, read_merged_count, write_merged_count')\n ntNIC = namedtuple('ntNIC', 'bytes_sent, bytes_recv, packets_sent, packets_recv, errin, errout, dropin, dropout')\n\n # calculate the difference within this interval\n netIO = psutil.net_io_counters(per_nic)\n if type(netIO) == dict and type(netIOPre) == dict:\n assert(netIO.keys() == netIOPre.keys()), \"NICs differ during interval\"\n for name, tup in netIO.items():\n if name.upper() == \"LO\":\n continue\n tup = ntNIC(*tuple(numpy.subtract(tup, netIOPre[name])))\n else:\n netIO = ntNIC(*tuple(numpy.subtract(netIO, netIOPre)))\n\n diskIO = psutil.disk_io_counters(per_disk)\n if type(diskIO) == dict and type(diskIOPre) == dict:\n assert (diskIO.keys() == diskIOPre.keys()), \"disks differ during interval\"\n for name, tup in diskIO.items():\n diskIO[name] = ntDisk(*tuple(numpy.subtract(tup, diskIOPre[name])))\n else:\n diskIO = ntDisk(*tuple(numpy.subtract(diskIO, diskIOPre)))\n\n if ('Linux' in self._OSPlatform):\n (memAvailBytes, memUsedPercentage,\n memBufferedBytes) = self.getMemStats()\n else:\n (memAvailBytes, memUsedPercentage) = self.getMemStats()\n\n\n retDict = dict()\n retDict['OVERALL CPU USED PERCENTAGE'] = CPUPercent\n for itr, value in enumerate(perCPUPercent):\n retDict['CPU%d USED PERCENTAGE' % itr] = perCPUPercent[itr]\n\n retDict['OVERALL MEMORY AVAILABLE BYTES'] = memAvailBytes\n retDict['OVERALL MEMORY USED PERCENTAGE'] = memUsedPercentage\n if ('Linux' in self._OSPlatform):\n retDict['OVERALL MEMORY BUFFERED BYTES'] = memBufferedBytes\n\n if per_disk:\n for name, tup in diskIO.items():\n retDict[name.upper() + ' READ OPS'] = tup.read_count\n retDict[name.upper() + ' WRITE OPS'] = tup.write_count\n retDict[name.upper() + ' READ BYTES'] = tup.read_bytes\n retDict[name.upper() + ' WRITE BYTES'] = tup.write_bytes\n else:\n retDict['OVERALL DISK READ OPS'] = diskIO.read_count\n retDict['OVERALL DISK WRITE OPS'] = diskIO.write_count\n retDict['OVERALL DISK READ BYTES'] = diskIO.read_bytes\n retDict['OVERALL DISK WRITE BYTES'] = diskIO.write_bytes\n\n if per_nic:\n for name, tup in netIO.items():\n retDict[name.upper() + ' SEND BYTES'] = tup.bytes_sent\n retDict[name.upper() + ' RECV BYTES'] = tup.bytes_recv\n retDict[name.upper() + ' INCOMING PACKETS DROPPED'] = tup.dropin\n retDict[name.upper() + ' OUTGOING PACKETS DROPPED'] = tup.dropout\n else:\n retDict['OVERALL NETWORK SEND BYTES'] = netIO.bytes_sent\n retDict['OVERALL NETWORK RECV BYTES'] = netIO.bytes_recv\n retDict['OVERALL INCOMING PACKETS DROPPED'] = netIO.dropin\n retDict['OVERALL OUTGOING PACKETS DROPPED'] = netIO.dropout\n\n return retDict\n","sub_path":"NGLMClient/Modules/CaptureSysResource.py","file_name":"CaptureSysResource.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"22030397","text":"from flask import Flask\nfrom python_dvd.app import api_bp\n\napp = Flask(__name__)\napp.config.from_object('config')\n\napp.register_blueprint(api_bp, url_prefix='/api')\n\nfrom database import db\ndb.init_app(app)\n\n@app.route('/')\ndef hello():\n return \"Hello World!\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"183758936","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.nn.init as init\nfrom torch.autograd import Variable\n\nif torch.cuda.is_available():\n T = torch.cuda\nelse:\n T = torch\n\ndef init_normal(layer):\n if type(layer) in [nn.Conv2d, nn.ConvTranspose2d]:\n # print(layer)\n init.normal_(layer.weight.data, 0, 0.02)\n elif type(layer) in [nn.BatchNorm2d]:\n init.normal_(layer.weight.data, 1.0, 0.02)\n init.constant_(layer.bias.data, 0.0)\n\nclass Noise(nn.Module):\n def __init__(self, use_noise, sigma=0.2):\n super(Noise, self).__init__()\n self.use_noise = use_noise\n self.sigma = sigma\n\n def forward(self, x):\n if self.use_noise:\n return x + self.sigma * Variable(T.FloatTensor(x.size()).normal_(), requires_grad=False)\n return x\n\nclass DepthVideoGenerator(nn.Module):\n def __init__(self, dim_z_content, dim_z_motion, video_length, ngf=64):\n super(DepthVideoGenerator, self).__init__()\n\n self.dim_z_content = dim_z_content\n self.dim_z_motion = dim_z_motion\n self.video_length = video_length\n\n dim_z = dim_z_motion + dim_z_content\n self.dim_z = dim_z\n\n self.recurrent = nn.GRUCell(dim_z_motion, dim_z_motion)\n\n self.main = nn.Sequential(\n nn.ConvTranspose2d(dim_z, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(ngf, 1, 4, 2, 1, bias=False),\n nn.Tanh()\n )\n\n self.apply(init_normal)\n\n def sample_z_m(self, batchsize):\n h_t = [self.get_gru_initial_state(batchsize)]\n for frame_num in range(self.video_length):\n e_t = self.get_iteration_noise(batchsize)\n h_t.append(self.recurrent(e_t, h_t[-1]))\n \n # (batchsize, dim_z_motion*self.video_length)\n z_m = torch.stack(h_t[1:], 1)\n # (batchsize*self.video_length, dim_z_motion)\n z_m = z_m.view(batchsize*self.video_length, -1)\n\n return z_m\n\n def sample_z_content(self, batchsize):\n content = Variable(T.FloatTensor(batchsize, self.dim_z_content).normal_())\n content = content.repeat(1, self.video_length)\\\n .view(batchsize*self.video_length, -1) # same operation as np.repeat\n return content\n\n def sample_z_video(self, batchsize):\n z_content = self.sample_z_content(batchsize)\n z_motion = self.sample_z_m(batchsize)\n \n z = torch.cat([z_content, z_motion], dim=1)\n\n return z\n\n def sample_videos(self, batchsize):\n z = self.sample_z_video(batchsize)\n\n h = self.main(z.view(batchsize*self.video_length, self.dim_z, 1, 1))\n h = h.view(batchsize, self.video_length, 1, 64, 64)\n\n h = h.permute(0, 2, 1, 3, 4)\n\n return h\n\n def get_gru_initial_state(self, batchsize):\n return Variable(T.FloatTensor(batchsize, self.dim_z_motion).normal_())\n\n def get_iteration_noise(self, batchsize):\n return Variable(T.FloatTensor(batchsize, self.dim_z_motion).normal_())\n\n def forward_dummy(self):\n return self.sample_videos(2)\n\nclass Inconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(Inconv, self).__init__()\n\n self.main = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel_size=3,\n stride=1, padding=1, bias=False),\n nn.LeakyReLU(inplace=True),\n )\n\n def forward(self, x):\n x = self.main(x)\n\n return x\n\nclass DownBlock(nn.Module):\n def __init__(self, in_ch, out_ch, dropout=False):\n super(DownBlock, self).__init__()\n \n layers = [\n nn.Conv2d(in_ch, out_ch, kernel_size=4,\n stride=2, padding=1, bias=False),\n nn.BatchNorm2d(out_ch),\n nn.LeakyReLU(0.2, inplace=True),\n ]\n\n if dropout:\n # between batchnorm and activation\n layers.insert(2, nn.Dropout2d(0.5, inplace=True))\n\n self.main = nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.main(x)\n\n return x\n\nclass UpBlock(nn.Module):\n def __init__(self, in_ch, out_ch, dropout=False):\n super(UpBlock, self).__init__()\n\n layers = [\n nn.ConvTranspose2d(in_ch, out_ch, kernel_size=4,\n stride=2, padding=1, bias=False),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n ]\n\n if dropout:\n # between batchnorm and activation\n layers.insert(2, nn.Dropout2d(0.5, inplace=True))\n\n self.main = nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.main(x)\n\n return x\n\nclass Outconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(Outconv, self).__init__()\n\n self.main = nn.Sequential(\n nn.ConvTranspose2d(in_ch, out_ch, kernel_size=3,\n stride=1, padding=1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, x):\n x = self.main(x)\n\n return x\n\nclass ColorVideoGenerator(nn.Module):\n def __init__(self, dim_z, ngf=64):\n super(ColorVideoGenerator, self).__init__()\n\n self.dim_z = dim_z\n \n self.inconv = Inconv(1, ngf*1)\n self.down_blocks = nn.ModuleList([\n DownBlock(ngf*1, ngf*1),\n DownBlock(ngf*1, ngf*2),\n DownBlock(ngf*2, ngf*4),\n DownBlock(ngf*4, ngf*4),\n DownBlock(ngf*4, ngf*4),\n DownBlock(ngf*4, ngf*4),\n ])\n\n self.up_blocks = nn.ModuleList([\n UpBlock(ngf*4+dim_z, ngf*4, dropout=True),\n UpBlock(ngf*8 , ngf*4, dropout=True),\n UpBlock(ngf*8 , ngf*4),\n UpBlock(ngf*8 , ngf*2),\n UpBlock(ngf*4 , ngf*1),\n UpBlock(ngf*2 , ngf*1),\n ])\n\n self.outconv = Outconv(ngf*2, 3)\n\n self.n_down_blocks = len(self.down_blocks)\n self.n_up_blocks = len(self.up_blocks)\n\n self.apply(init_normal)\n\n def make_hidden(self, batchsize):\n z = T.FloatTensor(1, self.dim_z).normal_()\n z = z.repeat(batchsize, 1) # (B, dim_z)\n z = z.unsqueeze(-1).unsqueeze(-1) # (B, dim_z, 1, 1)\n\n return Variable(z)\n\n def forward(self, x, z):\n # video to images\n B, C, H, W = x.shape\n \n # down\n hs = [self.inconv(x)]\n for i in range(self.n_down_blocks):\n hs.append(self.down_blocks[i](hs[-1]))\n \n # concat latent variable\n h = torch.cat([hs[-1], z], 1)\n\n # up\n h = self.up_blocks[0](h)\n for i in range(1, self.n_up_blocks):\n h = torch.cat([h, hs[-i-1]], 1)\n h = self.up_blocks[i](h)\n h = self.outconv(torch.cat([h, hs[0]], 1))\n\n return h\n\n def forward_videos(self, xs):\n B, C, T, H, W = xs.shape\n zs = self.make_hidden(B) # (B, C, 1, 1)\n zs = zs.unsqueeze(1).repeat(1,T,1,1,1) # (B, T, C, 1, 1)\n zs = zs.view(B*T, -1, 1, 1)\n\n xs = xs.permute(0, 2, 1, 3, 4) # (B, T, C, H, W)\n xs = xs.view(B*T, C, H, W)\n ys = self(xs, zs)\n ys = ys.view(B, T, 3, H, W)\n ys = ys.permute(0, 2, 1, 3, 4) # (B, C, T, H, W)\n\n return ys\n\n def forward_dummy(self):\n shape = (2, 1, 64, 64)\n z = self.make_hidden(2)\n x = Variable(T.FloatTensor(*shape).normal_())\n\n return self(x, z)\n\n\nclass ImageDiscriminator(nn.Module):\n def __init__(self, use_noise=False, noise_sigma=None, ndf=64):\n super(ImageDiscriminator, self).__init__()\n\n self.use_noise = use_noise\n\n self.conv_c = nn.Sequential(\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(3, ndf//2, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.conv_d = nn.Sequential(\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(1, ndf//2, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.main = nn.Sequential(\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf * 4, 1, 4, 2, 1, bias=False),\n )\n\n def forward(self, x):\n hc = self.conv_c(x[:,0:3])\n hd = self.conv_d(x[:,3:4])\n h = torch.cat([hc, hd], 1)\n h = self.main(h).squeeze()\n\n return h\n\n def forward_dummy(self):\n shape = (2, 4, 64, 64)\n x = Variable(T.FloatTensor(*shape).normal_())\n\n return self(x)\n\n\nclass VideoDiscriminator(nn.Module):\n def __init__(self, use_noise=False, noise_sigma=None, ndf=64):\n super(VideoDiscriminator, self).__init__()\n\n self.use_noise = use_noise\n\n self.conv_c = nn.Sequential(\n nn.Conv3d(3, ndf//2, 4, stride=(1,2,2), padding=(0,1,1), bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.conv_d = nn.Sequential(\n nn.Conv3d(1, ndf//2, 4, stride=(1,2,2), padding=(0,1,1), bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.main = nn.Sequential(\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv3d(ndf, ndf * 2, 4, stride=(1,2,2), padding=(0,1,1), bias=False),\n nn.BatchNorm3d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv3d(ndf * 2, ndf * 4, 4, stride=(1,2,2), padding=(0,1,1), bias=False),\n nn.BatchNorm3d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv3d(ndf * 4, 1, 4, stride=(1,2,2), padding=(0,1,1), bias=False),\n )\n\n def forward(self, x):\n hc = self.conv_c(x[:,0:3])\n hd = self.conv_d(x[:,3:4])\n h = torch.cat([hc, hd], 1)\n h = self.main(h).squeeze()\n\n return h\n\n def forward_dummy(self):\n shape = (2, 4, 16, 64, 64)\n x = Variable(T.FloatTensor(*shape).normal_())\n\n return self(x)\n\nif __name__==\"__main__\":\n print(dict(ColorVideoGenerator(10).named_parameters()).keys())\n # print(DepthVideoGenerator(30,10,16).forward_dummy().shape)\n # print(ColorVideoGenerator(10).forward_dummy().shape)\n # print(ImageDiscriminator().forward_dummy().shape)\n # print(VideoDiscriminator().forward_dummy().shape)\n","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"189331970","text":"# B - Red or Blue\nimport sys\ninput = sys.stdin.readline\nn = int(input())\nhuts = input()\nr = 0\nb = 0\nfor i in range(n):\n\tif huts[i] == \"R\":\n\t\tr += 1\n\telse:\n\t\tb += 1\nif r > b:\n\tprint(\"Yes\")\nelse:\n\tprint(\"No\")","sub_path":"Python_codes/p03080/s635035528.py","file_name":"s635035528.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"358823051","text":"\"\"\"\n\nSokoban Game\nThis program takes a text file of a Sokoban map,\nutilizes sokoban_map.py to create an instance of SokobanMap,\nand solves the map through either uniform cost search\nor a* algorithm.\n\nAghnia Prawira - 45610240\nBagus S. Baskoro - 45445602\n\nCOMP3702 2019\n\n\"\"\"\n\n# COMP3702 2019 Assignment 1 Support Code by njc\nfrom sokoban_map import SokobanMap\n\nimport copy\nimport sys\nimport queue\nimport time\n\n# start timer\nstart = time.time()\n\n# input file symbols\nOBSTACLE_SYMBOL = '#'\n\nclass SokobanState:\n \"\"\"\n Class that stores a game state.\n\n Player position and box positions are stored specific to each\n state, but an instance of SokobanMap (that stores obstacles and\n target positions) is only passed since obstacles\n and target positions stay constant throughout the game.\n \"\"\"\n\n def __init__(self, player_position,\n box_positions, deadlock, direction, sokoban_map):\n self.player_position = copy.deepcopy(player_position)\n self.player_x = self.player_position[1]\n self.player_y = self.player_position[0]\n self.box_positions = copy.deepcopy(box_positions)\n self.direction = direction\n self.sokoban_map = sokoban_map\n self.deadlock = deadlock\n\n def __members(self):\n \"\"\"\n Creates a tuple of player position and box positions\n for creating hash.\n\n :return: tuple of player position and box positions\n \"\"\"\n return (tuple(self.player_position), tuple(self.box_positions))\n\n def __hash__(self):\n \"\"\"\n Modifies default hash function.\n A state is considered equivalent when player position\n and box positions are the same.\n \"\"\"\n return hash(self.__members())\n\n def __eq__(self, other):\n \"\"\"\n Modifies default eq function.\n A state is considered equivalent when player position\n and box positions are the same.\n \"\"\"\n if type(other) is type(self):\n return self.__members() == other.__members()\n else:\n return False\n\n def is_goal(self):\n \"\"\"\n Determine whether a state is a goal state.\n Goal state is defined as a state where all boxes are in target positions.\n\n :return: True when state is a goal state, False when not.\n \"\"\"\n is_goal = True\n for i in self.box_positions:\n if i not in self.sokoban_map.tgt_positions:\n is_goal = False\n return is_goal\n\n def get_successors(self):\n \"\"\"\n Get all successors to a state.\n Successors are states that can be reached from a state through one move\n (either left, right, up, or down.)\n A state is reachable if there are no obstacles or other boxes on the way.\n\n :return: List of all reachable states.\n \"\"\"\n successors = []\n\n #LEFT\n # checks if there is an obstacle to the left of the player\n if self.sokoban_map.obstacle_map[self.player_y][self.player_x - 1] != OBSTACLE_SYMBOL:\n # change player position\n new_box_positions = copy.deepcopy(self.box_positions)\n new_x = self.player_x - 1\n new_y = self.player_y\n\n # checks if there is a box on player's position\n if (new_y, new_x) in self.box_positions:\n # checks if there is an obstacle, another box, or a deadlocked square\n # to left of the box\n if self.sokoban_map.obstacle_map[new_y][new_x - 1] != OBSTACLE_SYMBOL and (\n new_y, new_x - 1) not in self.box_positions and (\n new_y, new_x - 1) not in self.deadlock:\n\n new_box_x = new_x - 1\n new_box_y = new_y\n\n # change box position\n new_box_positions.remove((new_y, new_x))\n new_box_positions.append((new_box_y, new_box_x))\n\n # create new state and append to list of successors\n new_state = SokobanState([new_y, new_x], new_box_positions, self.deadlock, \"l\", self.sokoban_map)\n if not frozen_deadlock(new_state):\n successors.append(new_state)\n else:\n new_state = SokobanState([new_y, new_x], new_box_positions, self.deadlock, \"l\", self.sokoban_map)\n successors.append(new_state)\n\n #RIGHT\n if self.sokoban_map.obstacle_map[self.player_y][self.player_x + 1] != OBSTACLE_SYMBOL:\n new_box_positions = copy.deepcopy(self.box_positions)\n new_x = self.player_x + 1\n new_y = self.player_y\n\n if (new_y, new_x) in self.box_positions:\n if self.sokoban_map.obstacle_map[new_y][new_x + 1] != OBSTACLE_SYMBOL and (\n new_y, new_x + 1) not in self.box_positions and (\n new_y, new_x + 1) not in self.deadlock:\n new_box_x = new_x + 1\n new_box_y = new_y\n\n new_box_positions.remove((new_y, new_x))\n new_box_positions.append((new_box_y, new_box_x))\n\n new_state = SokobanState([new_y, new_x], new_box_positions, self.deadlock, \"r\", self.sokoban_map)\n if not frozen_deadlock(new_state):\n successors.append(new_state)\n\n else:\n new_state = SokobanState([new_y, new_x], new_box_positions, self.deadlock, \"r\", self.sokoban_map)\n successors.append(new_state)\n\n #UP\n if self.sokoban_map.obstacle_map[self.player_y - 1][self.player_x] != OBSTACLE_SYMBOL:\n new_box_positions = copy.deepcopy(self.box_positions)\n new_x = self.player_x\n new_y = self.player_y - 1\n\n if (new_y, new_x) in self.box_positions:\n if self.sokoban_map.obstacle_map[new_y - 1][new_x] != OBSTACLE_SYMBOL and (\n new_y - 1, new_x) not in self.box_positions and (\n new_y - 1, new_x) not in self.deadlock:\n new_box_x = new_x\n new_box_y = new_y - 1\n\n new_box_positions.remove((new_y, new_x))\n new_box_positions.append((new_box_y, new_box_x))\n\n new_state = SokobanState([new_y, new_x], new_box_positions, self.deadlock, \"u\", self.sokoban_map)\n if not frozen_deadlock(new_state):\n successors.append(new_state)\n\n else:\n new_state = SokobanState([new_y, new_x], new_box_positions, self.deadlock, \"u\", self.sokoban_map)\n successors.append(new_state)\n\n #DOWN\n if self.sokoban_map.obstacle_map[self.player_y + 1][self.player_x] != OBSTACLE_SYMBOL:\n new_box_positions = copy.deepcopy(self.box_positions)\n new_x = self.player_x\n new_y = self.player_y + 1\n\n if (new_y, new_x) in self.box_positions:\n if self.sokoban_map.obstacle_map[new_y + 1][new_x] != OBSTACLE_SYMBOL and (\n new_y + 1, new_x) not in self.box_positions and (\n new_y + 1, new_x) not in self.deadlock:\n new_box_x = new_x\n new_box_y = new_y + 1\n\n new_box_positions.remove((new_y, new_x))\n new_box_positions.append((new_box_y, new_box_x))\n\n new_state = SokobanState([new_y, new_x], new_box_positions, self.deadlock, \"d\", self.sokoban_map)\n if not frozen_deadlock(new_state):\n successors.append(new_state)\n\n else:\n new_state = SokobanState([new_y, new_x], new_box_positions, self.deadlock, \"d\", self.sokoban_map)\n successors.append(new_state)\n\n return successors\n\nclass PriorityEntryUCS(object):\n \"\"\"\n Class that stores the cost to get to a state,\n the path to get to a state,\n and the state itself.\n\n Made to allow priority queue to sort priority by cost only.\n \"\"\"\n def __init__(self, priority, path, state):\n self.priority = priority\n self.path = path\n self.state = state\n\n def __lt__(self, other):\n \"\"\"\n Modifies default function. Compares object only by priority.\n The lower cost takes higher priority.\n \"\"\"\n return self.priority < other.priority\n\ndef ucs(initial):\n \"\"\"\n Implements uniform cost search algorithm to solve a Sokoban game.\n Solving the game means returning an optimal path to reach the goal state\n where all boxes are in the target positions.\n\n Explores states with lowest cost first.\n\n :param initial: initial Sokoban game state\n :return: path to solve game\n \"\"\"\n\n # set of visited (expanded) states\n visited = set()\n\n # number of generated states\n states_generated = 0\n\n # uses priority queue to expand states by priority\n priority_queue = queue.PriorityQueue()\n priority_queue.put(PriorityEntryUCS(0, [], initial))\n\n while priority_queue:\n priority_entry = priority_queue.get()\n cost = priority_entry.priority\n state = priority_entry.state\n path = priority_entry.path\n\n # to avoid revisiting equivalent states\n # (where box positions and player positions are the same)\n if state not in visited:\n visited.add(state)\n\n if state.is_goal():\n print()\n print('Generated states = ', states_generated)\n print('Fringe states = ', priority_queue.qsize())\n print('Explored states = ', len(visited))\n print('Time required = ', -start + time.time())\n print()\n print('Game solved in ' + str(len(path)) + \" steps!\")\n\n return path\n\n successors = state.get_successors()\n for i in successors:\n states_generated += 1\n if i not in visited:\n # cost of each step is considered to be 1 (one)\n total_cost = cost + 1\n\n # update path\n new_path = copy.deepcopy(path)\n new_path.append(i.direction)\n # print(total_cost)\n # print(new_path)\n priority_queue.put(PriorityEntryUCS(total_cost, new_path, i))\n\ndef heuristic(state):\n \"\"\"\n Calculates Manhattan distance between all boxes and their closest target.\n\n\n \"\"\"\n box_positions = state.box_positions\n tgt_positions = state.sokoban_map.tgt_positions\n\n heuristic = 0\n\n for i in box_positions:\n min_distance = float('inf')\n for j in tgt_positions:\n (x1, y1) = i\n (x2, y2) = j\n manhattan_distance = abs(x1 - x2) + abs(y1 - y2)\n if manhattan_distance <= min_distance:\n min_distance = manhattan_distance\n heuristic += min_distance\n\n return heuristic\n\nclass PriorityEntryAS(object):\n \"\"\"\n Class that stores the priority (based on calculated heuristic),\n the cost to get to a state,\n the path to get to a state,\n and the state itself.\n\n Made to allow priority queue to sort priority by cost only.\n \"\"\"\n def __init__(self, priority, cost, path, state):\n self.priority = priority\n self.cost = cost\n self.path = path\n self.state = state\n\n def __lt__(self, other):\n \"\"\"\n Modifies default function. Compares object only by priority.\n The lower cost takes higher priority.\n \"\"\"\n return self.priority < other.priority\n\n\ndef a_star(initial):\n \"\"\"\n Implements a* algorithm to solve a Sokoban game.\n Solving the game means returning an optimal path to reach the goal state\n where all boxes are in the target positions.\n\n Explores states with lowest cost first.\n Implementation is similar to UCS, the only difference being the\n total cost = cost of path from initial state + heuristic\n\n :param initial: initial Sokoban game state\n :return: path to solve game\n \"\"\"\n\n visited = set()\n states_generated = 0\n priority_queue = queue.PriorityQueue()\n priority_queue.put(PriorityEntryAS(0, 0, [], initial))\n\n while priority_queue:\n priority_entry = priority_queue.get()\n cost = priority_entry.cost\n state = priority_entry.state\n path = priority_entry.path\n\n if state not in visited:\n visited.add(state)\n\n if state.is_goal():\n print()\n print('Generated states = ', states_generated)\n print('Fringe states = ', priority_queue.qsize())\n print('Explored states = ', len(visited))\n print('Time required = ', -start + time.time())\n print()\n print('Game solved in ' + str(len(path)) + \" steps!\")\n\n return path\n\n successors = state.get_successors()\n\n for i in successors:\n states_generated += 1\n if i not in visited:\n new_cost = cost + 1\n h = heuristic(i)\n total_cost = new_cost + h\n new_path = copy.deepcopy(path)\n new_path.append(i.direction)\n # print(new_path)\n # print(total_cost)\n priority_queue.put(PriorityEntryAS(total_cost, new_cost, new_path, i))\n\n\ndef frozen_deadlock(state):\n box_positions = state.box_positions\n tgt_positions = state.sokoban_map.tgt_positions\n obstacle_map = state.sokoban_map.obstacle_map\n\n \"\"\"\n three boxes around an obstacle\n \"\"\"\n\n # def is_obstacle(y_pos, x_pos):\n # return obstacle_map[y_pos][x_pos] == OBSTACLE_SYMBOL\n #\n # if len(box_positions) >= 3:\n # for i in box_positions:\n # # print(i)\n # \"\"\"i is in top left\"\"\"\n # # obstacle in bottom right\n # # check bottom left\n # if (i[0] + 1, i[1]) in box_positions:\n # # check top right\n # # print((i[0] + 1, i[1]))\n # if (i[0], i[1] - 1) in box_positions:\n # # check bottom right\n # if is_obstacle(i[0] + 1, i[1] - 1):\n # # if [i, (i[0] + 1, i[1]), (i[0], i[1] - 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where1\")\n # return True\n #\n # # obstacle in bottom left\n # # check bottom left\n # if is_obstacle(i[0] + 1, i[1]):\n # # check top right\n # # print((i[0] + 1, i[1]))\n # if (i[0], i[1] - 1) in box_positions:\n # # check bottom right\n # if (i[0] + 1, i[1] - 1) in box_positions:\n # # if [i, (i[0] + 1, i[1]), (i[0], i[1] - 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where2\")\n # return True\n #\n # # obstacle in top right\n # # check bottom left\n # if (i[0] + 1, i[1]) in box_positions:\n # # check top right\n # # print((i[0] + 1, i[1]))\n # if is_obstacle(i[0], i[1] - 1):\n # # check bottom right\n # if (i[0] + 1, i[1] - 1) in box_positions:\n # # if [i, (i[0] + 1, i[1]), (i[0], i[1] - 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where3\")\n # return True\n #\n # \"\"\"i is in top right\"\"\"\n # # obstacle in bottom left\n # # check bottom right\n # if (i[0] + 1, i[1]) in box_positions:\n # # check top left\n # # print((i[0] + 1, i[1]))\n # if (i[0], i[1] + 1) in box_positions:\n # # check bottom left\n # if is_obstacle(i[0] + 1, i[1] + 1):\n # # if [i, (i[0] + 1, i[1]), (i[0], i[1] - 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where4\")\n # return True\n #\n # # obstacle in bottom right\n # # check bottom right\n # if is_obstacle(i[0] + 1, i[1]):\n # # check top left\n # # print((i[0] + 1, i[1]))\n # if (i[0], i[1] + 1) in box_positions:\n # # check bottom left\n # if (i[0] + 1, i[1] + 1) in box_positions:\n # # if [i, (i[0] + 1, i[1]), (i[0], i[1] - 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where5\")\n # return True\n #\n # # obstacle in top left\n # # check bottom right\n # if (i[0] + 1, i[1]) in box_positions:\n # # check top left\n # # print((i[0] + 1, i[1]))\n # if is_obstacle(i[0], i[1] + 1):\n # # check bottom left\n # if (i[0] + 1, i[1] + 1) in box_positions:\n # # if [i, (i[0] + 1, i[1]), (i[0], i[1] - 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where6\")\n # return True\n\n\n # # i is in top right\n # # check bottom right\n # if (i[0] + 1, i[1]) in box_positions:\n # print((i[0] + 1, i[1]))\n # # check top left\n # if (i[0], i[1] + 1) in box_positions:\n # # check bottom left\n # if (i[0] + 1, i[1] + 1) in box_positions:\n # if [i, (i[0] + 1, i[1]), (i[0], i[1] + 1), (i[0] + 1, i[1] + 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where2\")\n # return True\n #\n # # i is in bottom right\n # # check top right\n # if (i[0] - 1, i[1]) in box_positions:\n # # check bottom left\n # print((i[0] - 1, i[1]))\n # if (i[0], i[1] + 1) in box_positions:\n # # check top left\n # if (i[0] - 1, i[1] + 1) in box_positions:\n # if [i, (i[0] - 1, i[1]), (i[0], i[1] + 1), (i[0] - 1, i[1] + 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where3\")\n # return True\n #\n # # i is in bottom left\n # # check top left\n # if (i[0] - 1, i[1]) in box_positions:\n # # check bottom right\n # print((i[0] - 1, i[1]))\n # if (i[0], i[1] - 1) in box_positions:\n # # check top right\n # if (i[0] - 1, i[1] - 1) in box_positions:\n # if [i, (i[0] - 1, i[1]), (i[0], i[1] - 1), (i[0] - 1, i[1] - 1)] not in tgt_positions:\n # print(box_positions)\n # print(\"where4\")\n # return True\n\n # check if position is blocked by wall along the x axis (above or below box)\n def x_along_wall(pos):\n y = pos[0]\n x = pos[1]\n\n # up\n if obstacle_map[y - 1][x] == OBSTACLE_SYMBOL:\n return True\n # down\n elif obstacle_map[y + 1][x] == OBSTACLE_SYMBOL:\n return True\n else:\n return False\n\n # check if position is blocked by wall along the y axis (left or right side of box)\n def y_along_wall(pos):\n y = pos[0]\n x = pos[1]\n\n # right\n if obstacle_map[y][x - 1] == OBSTACLE_SYMBOL:\n return True\n # left\n elif obstacle_map[y][x + 1] == OBSTACLE_SYMBOL:\n return True\n else:\n return False\n\n \"\"\"\n two boxes next to each other along a wall\n # B\t B #\t # #\t B B\t # B \t#\n # B\t B #\t B B\t # #\t B #\tBB\tetc.\n\t\t\t\t\t #\n \"\"\"\n if len(box_positions) > 1:\n for i in box_positions:\n if y_along_wall(i):\n if (i[0] - 1, i[1]) in box_positions and y_along_wall((i[0] - 1, i[1])):\n if i not in tgt_positions and (i[0] - 1, i[1]) not in tgt_positions:\n return True\n elif (i[0] + 1, i[1]) in box_positions and y_along_wall((i[0] + 1, i[1])):\n if i not in tgt_positions and (i[0] + 1, i[1]) not in tgt_positions:\n return True\n if x_along_wall(i):\n if (i[0], i[1] - 1) in box_positions and x_along_wall((i[0], i[1] - 1)):\n if i not in tgt_positions and (i[0], i[1] - 1) not in tgt_positions:\n return True\n elif (i[0], i[1] + 1) in box_positions and x_along_wall((i[0], i[1] + 1)):\n if i not in tgt_positions and (i[0], i[1] + 1) not in tgt_positions:\n return True\n\n # \"\"\"\n # four boxes forming a sqaure\n # ##\n # ##\n # \"\"\"\n # if len(box_positions) == 4:\n # for i in box_positions:\n # if\n\n return False\n\n\ndef simple_deadlock(sokoban_map):\n tgt_positions = sokoban_map.tgt_positions\n obstacle_map = sokoban_map.obstacle_map\n x_size = sokoban_map.x_size\n y_size = sokoban_map.y_size\n\n deadlock = []\n\n \"\"\"\n corners\n \"\"\"\n for i in range(x_size):\n for j in range(y_size):\n x_pos = i\n y_pos = j\n if obstacle_map[y_pos][x_pos] != OBSTACLE_SYMBOL:\n # top right corner\n if sokoban_map.obstacle_map[y_pos][x_pos - 1] == OBSTACLE_SYMBOL and \\\n sokoban_map.obstacle_map[y_pos - 1][x_pos] == OBSTACLE_SYMBOL:\n deadlock.append((y_pos, x_pos))\n # bottom right corner\n elif sokoban_map.obstacle_map[y_pos][x_pos - 1] == OBSTACLE_SYMBOL and \\\n sokoban_map.obstacle_map[y_pos + 1][x_pos] == OBSTACLE_SYMBOL:\n deadlock.append((y_pos, x_pos))\n # top left corner\n elif sokoban_map.obstacle_map[y_pos][x_pos + 1] == OBSTACLE_SYMBOL and \\\n sokoban_map.obstacle_map[y_pos - 1][x_pos] == OBSTACLE_SYMBOL:\n deadlock.append((y_pos, x_pos))\n # bottom left corner\n elif sokoban_map.obstacle_map[y_pos][x_pos + 1] == OBSTACLE_SYMBOL and \\\n sokoban_map.obstacle_map[y_pos + 1][x_pos] == OBSTACLE_SYMBOL:\n deadlock.append((y_pos, x_pos))\n\n \"\"\"\n all the squares in a line between two deadlocked positions are\n also unsafe if they are positioned along a wall without goals\n \"\"\"\n\n # along x-axis (column)\n x_deadlock = []\n temp_deadlock = copy.deepcopy(deadlock)\n for i in range(len(temp_deadlock) - 1):\n if temp_deadlock[i][1] == temp_deadlock[i + 1][1]:\n pot_deadlock = []\n if (temp_deadlock[i+1][0], temp_deadlock[i+1][1]) not in tgt_positions:\n if (temp_deadlock[i][0], temp_deadlock[i][1]) not in tgt_positions:\n for j in range(temp_deadlock[i][0] + 1, temp_deadlock[i + 1][0]):\n y_pos = j\n x_pos = temp_deadlock[i][1]\n # print(\"ypos\", y_pos, x_pos)\n if sokoban_map.obstacle_map[y_pos][x_pos + 1] == OBSTACLE_SYMBOL \\\n or sokoban_map.obstacle_map[y_pos][x_pos - 1] == OBSTACLE_SYMBOL:\n if (y_pos, x_pos) not in tgt_positions:\n if sokoban_map.obstacle_map[y_pos][x_pos] != OBSTACLE_SYMBOL:\n pot_deadlock.append((y_pos, x_pos))\n # print(pot_deadlock)\n else:\n # print(\"obstacle\")\n pot_deadlock.clear()\n break\n else:\n # print(\"target\")\n pot_deadlock.clear()\n break\n else:\n # print(\"wall\")\n pot_deadlock.clear()\n break\n else:\n # print(\"awal is target\")\n deadlock.remove((temp_deadlock[i][0], temp_deadlock[i][1]))\n else:\n # print(\"ujung is target\")\n deadlock.remove((temp_deadlock[i+1][0], temp_deadlock[i+1][1]))\n\n x_deadlock.extend(pot_deadlock)\n #visited.append(i + 1)\n\n # print(temp_deadlock)\n temp_deadlock.sort()\n # print(temp_deadlock)\n\n # along x-axis (column)\n y_deadlock = []\n for i in range(len(temp_deadlock) - 1):\n # if i not in visited:\n if temp_deadlock[i][0] == temp_deadlock[i + 1][0]:\n pot_deadlock = []\n # print(temp_deadlock[i+1][0], temp_deadlock[i+1][1])\n if (temp_deadlock[i+1][0], temp_deadlock[i+1][1]) not in tgt_positions:\n if (temp_deadlock[i][0], temp_deadlock[i][1]) not in tgt_positions:\n for j in range(temp_deadlock[i][1] + 1, temp_deadlock[i + 1][1]):\n y_pos = temp_deadlock[i][0]\n x_pos = j\n # print(\"ypos\", y_pos, x_pos)\n if sokoban_map.obstacle_map[y_pos + 1][x_pos] == OBSTACLE_SYMBOL \\\n or sokoban_map.obstacle_map[y_pos - 1][x_pos] == OBSTACLE_SYMBOL:\n if (y_pos, x_pos) not in tgt_positions:\n if sokoban_map.obstacle_map[y_pos][x_pos] != OBSTACLE_SYMBOL:\n pot_deadlock.append((y_pos, x_pos))\n # print(pot_deadlock)\n else:\n # print(\"obstacle\")\n pot_deadlock.clear()\n break\n else:\n # print(\"target\")\n pot_deadlock.clear()\n break\n else:\n # print(\"wall\")\n pot_deadlock.clear()\n break\n else:\n # print(\"awal is target\")\n if (temp_deadlock[i][0], temp_deadlock[i][1]) in deadlock:\n deadlock.remove((temp_deadlock[i][0], temp_deadlock[i][1]))\n else:\n # print(\"ujung is target\")\n if (temp_deadlock[i+1][0], temp_deadlock[i+1][1]) in deadlock:\n deadlock.remove((temp_deadlock[i+1][0], temp_deadlock[i+1][1]))\n\n y_deadlock.extend(pot_deadlock)\n\n deadlock.extend(x_deadlock)\n deadlock.extend(y_deadlock)\n\n return deadlock\n\ndef main(arglist):\n \"\"\"\n Program takes a text file of a Sokoban map and outputs a text file\n with solution to the Sokoban game.\n\n :param arglist: file name of Sokoban map and desired file name of solution file\n \"\"\"\n\n if len(arglist) != 2:\n print(\"Running this file solves a game of Sokoban based on the given map file.\")\n print(\"Outputs a text file with the solution to the game.\")\n print(\"Usage: solver.py [map_file_name] [output_file_name]\")\n return\n\n map_inst = SokobanMap(arglist[0])\n\n # generate list of simple deadlocks before running search algorithm\n deadlock = simple_deadlock(map_inst)\n sokoban_state = SokobanState(map_inst.player_position,\n map_inst.box_positions, deadlock,\n \"\", map_inst)\n\n output_name = arglist[1]\n file_output = open(output_name, \"w+\")\n\n final_path = ','.join(a_star(sokoban_state))\n\n file_output.write(final_path)\n file_output.close()\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"a1-COMP3702-45610240-45445602/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":28824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"516832842","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom collections import OrderedDict\n\naffine_par = True\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):\n super(Bottleneck, self).__init__()\n # 1x1 conv to reduce number of feature maps\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1,stride=stride, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, affine=affine_par)\n for i in self.bn1.parameters():\n i.requires_grad = False\n\n padding = dilation\n # 3x3 conv\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,\n padding=padding, bias = False, dilation= dilation)\n self.bn2 = nn.BatchNorm2d(planes, affine=affine_par)\n for i in self.bn2.parameters():\n i.requires_grad = False\n\n # 1x1 conv to increase number of feature maps\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes*4, affine=affine_par)\n for i in self.bn3.parameters():\n i.requires_grad = False\n\n self.relu = nn.ReLU()\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n out = self.relu(out)\n\n # if downsample is not None, then x need change it's size\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass PSP_Module(nn.Module):\n def __init__(self, size_series):\n super(PSP_Module, self).__init__()\n self.pool2d_list = nn.ModuleList([self._make_pool(size) for size in size_series])\n\n def _make_pool(self, size):\n pool = nn.AdaptiveAvgPool2d(output_size=(size, size))\n conv = nn.Conv2d(2048, 512, kernel_size=1, stride=1, bias=False)\n # we get some trouble in bn about affine\n bn = nn.BatchNorm2d(512)\n relu = nn.ReLU()\n interp_layer = nn.Upsample(size= size, mode='bilinear')\n return nn.Sequential(pool, conv, bn, relu)\n\n def forward(self, x):\n h, w = x.size(2), x.size(3)\n pool1 = F.upsample(input=self.pool2d_list[0](x), size=(h, w), mode='bilinear')\n pool2 = F.upsample(input=self.pool2d_list[1](x), size=(h, w), mode='bilinear')\n pool3 = F.upsample(input=self.pool2d_list[2](x), size=(h, w), mode='bilinear')\n pool6 = F.upsample(input=self.pool2d_list[3](x), size=(h, w), mode='bilinear')\n out = torch.cat((pool1, pool2, pool3, pool6, x), dim=1)\n return out\n\n\nclass Classification_Module(nn.Module):\n def __init__(self, num_classes):\n super(Classification_Module, self).__init__()\n self.conv1 = nn.Conv2d(4096, 512, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn = nn.BatchNorm2d(512, affine=affine_par)\n for i in self.bn.parameters():\n i.requires_grad = False\n self.relu = nn.ReLU()\n self.drop = nn.Dropout2d(p=0.1)\n self.conv2 = nn.Conv2d(512, num_classes, kernel_size=1, stride=1)\n self.upsample = nn.Upsample(scale_factor=8, mode='bilinear')\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.drop(x)\n x = self.conv2(x)\n x = self.upsample(x)\n return x\n\n\nclass Classification(nn.Sequential):\n def __init__(self, num_feature, num_classes, input_size):\n super(Classification, self).__init__()\n self.classificiation = nn.Sequential(OrderedDict([\n ('conv0', nn.Conv2d(num_feature, (num_feature/8), kernel_size=3, stride=1, padding=1, bias=False)),\n ('bn0', nn.BatchNorm2d(num_feature/8)),\n ('relu0', nn.ReLU(inplace=True)),\n ('dropout', nn.Dropout2d(p=0.1)),\n ('conv', nn.Conv2d(num_feature/8, num_classes, kernel_size=1, stride=1, bias=True)),\n ('interp', nn.Upsample(size=input_size, mode='bilinear'))\n ]))\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes, size = 64):\n # number of input feature map in initial block\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64, affine=affine_par)\n for i in self.bn1.parameters():\n i.requires_grad = False\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)\n self.layer5 = self._make_psp_layer(PSP_Module, [1, 2, 3, 6])\n self.layer6 = self._make_pre_layer(Classification_Module, num_classes)\n self.conv_aux = nn.Conv2d(2048, num_classes, kernel_size=1, stride=1)\n self.conv_aux_interp = nn.Upsample(size=(512, 512), mode='bilinear')\n self.interp = nn.Upsample(size=(512, 512), mode='bilinear')\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, 0.01)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, affine= affine_par))\n\n for i in downsample._modules['1'].parameters():\n i.requires_grad = False\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample))\n # number of featur maps has changed\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation))\n\n return nn.Sequential(*layers)\n\n def _make_psp_layer(self, block, size_series):\n return block(size_series)\n\n def _make_pre_layer(self, block, num_classes):\n return block(num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x_aux = self.conv_aux(x)\n x_aux = self.conv_aux_interp(x_aux)\n x = self.layer5(x)\n x = self.layer6(x)\n x = self.interp(x)\n\n return x, x_aux\n\n\ndef PSPNet(num_classes=5):\n model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes)\n return model\n\n\nif __name__ == '__main__':\n model = PSPNet(5)\n name = model.state_dict().copy()\n for i in name:\n print(i)\n","sub_path":"pspnet.py","file_name":"pspnet.py","file_ext":"py","file_size_in_byte":7582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"231730510","text":"#import \"argv\" function from module \"sys\"\nfrom sys import argv\n\n#pass argument's names to \"argv\" function, to take filename to to next steps in script\n#script, filename = argv\nfilename = input(\"Print filename to open: \")\n\n#assign name \"txt\" to variable and say to it open file, used in variable \"filename\"\ntxt = open(filename)\n\n#print name and path to file, wich we will be read\nprint(f\"text of {filename}:\")\n\n#print content of file\nprint(txt.read())\n\n# #read another filename to print it context\n# print(\"enter filename again\")\n# file_again = input(\"> \")\n\n# #assign variable \"text_againt\" to new file content\n# text_again = open(file_again)\n\n# #print content of new file\n# print(text_again.read())\n#txt.close()\n#close(text_again)","sub_path":"CodingLessons/python/EasyWayPython3/1week020919/ex15.py","file_name":"ex15.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"132924388","text":"def fizzbuzz(limit):\n\tfor i in range(1, limit + 1):\n\t\tif i % 15 == 0:\n\t\t\tprint(\"FizzBuzz!\")\n\t\telif i % 5 == 0:\n\t\t\tprint(\"Buzz!\")\n\t\telif i % 3 == 0:\n\t\t\tprint(\"Fizz!\")\n\t\telse:\n\t\t\tprint(i)\n\ndef main():\n\twhile True:\n\t\ttry:\n\t\t\tprint(\"Enter a number to fizzbuzz up to:\")\n\t\t\tuser_input = int(input(\"> \"))\n\t\t\tbreak\n\t\texcept NameError:\n\t\t\tprint(\"Please only enter a number!\")\n\t\t\tprint(\"---------------------------------\")\n\n\tfizzbuzz(user_input)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"05_fizzbuzz/fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"146831055","text":"\nimport sys\nimport os\nimport logbook\nimport logbook.more\ndef logFormate(record,handler):\n formate = \"[{date}] [{level}] [{filename}] [{func_name}] [{lineno}] {msg}\".format(\n date = record.time, # 日志时间\n level = record.level_name, # 日志等级\n filename = os.path.split(record.filename)[-1], # 文件名\n func_name = record.func_name, # 函数名\n lineno = record.lineno, # 行号\n msg = record.message # 日志内容\n )\n return formate\ndef initLogger(filename,fileLogFlag=True,stdOutFlag=False):\n LOG_DIR = os.path.join('log')\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR)\n logbook.set_datetime_format('local')\n logger = logbook.Logger(filename)\n logger.handlers = []\n if fileLogFlag:#日志输出到文件\n logFile = logbook.TimedRotatingFileHandler(os.path.join(LOG_DIR, '%s.log' % 'log'),date_format='%Y-%m-%d', bubble=True, encoding='utf-8')\n logFile.formatter = logFormate\n logger.handlers.append(logFile)\n if stdOutFlag:#日志打印到屏幕\n logStd = logbook.more.ColorizedStderrHandler(bubble=True)\n logStd.formatter = logFormate\n logger.handlers.append(logStd)\n return logger","sub_path":"logging_base/logging_test.py","file_name":"logging_test.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"650307079","text":"\nfrom utils import read_data, split_data\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.utils import to_categorical\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras.layers import Input, Activation, Bidirectional, Dropout, \\\n\tEmbedding, Dense, GRU, Lambda, BatchNormalization, Flatten, Conv1D, MaxPooling1D\nfrom keras.backend import ctc_label_dense_to_sparse, ctc_batch_cost, ctc_decode\nfrom keras.metrics import categorical_accuracy, sparse_categorical_accuracy\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n# '''\ndata, labels, X_length = read_data('spectrogram_no_pad', pad=1, shuffle=True)\nval_split = 9984\ntest_split = val_split + 128 * 30\ntrain_data, train_labels, train_length, val_data, val_labels, val_length, \\\ntest_data, test_labels, test_length = split_data(data, labels, val_split, test_split, X_length)\nprint('done reading data')\n\ndef stream(data, labels, X_length):\n indices = list(range(len(data)))\n while True:\n for i in indices:\n yield data[i], labels[i], X_length[i]\n\ndef get_dense_batch(stream, batch_size):\n data_batch, label_batch, x_length, y_length = [[] for i in range(4)]\n while 1:\n for datum, label, length in stream:\n data_batch.append(datum)\n label_batch.append(label)\n x_length.append(length)\n y_length.append(len(label))\n if len(data_batch) == batch_size:\n input_={'data_batch': data_batch,\n 'label_batch:': label_batch,\n 'input_length': x_length,\n 'label_length': y_length}\n output_={'ctc': np.zeros([batch_size])}\n # print([(i, np.shape(input_[i])) for i in input_])\n # print(label_batch)\n yield ([data_batch, label_batch, x_length, y_length], [output_['ctc']])\n # yield (input_, output_)\n data_batch, label_batch, x_length, y_length = [[] for i in range(4)]\n yield data_batch, label_batch, x_length, y_length\n\nbatch_size = 128\ntrain_stream = stream(train_data, train_labels, train_length)\ntrain_generator = get_dense_batch(train_stream, batch_size)\nval_stream = stream(val_data, val_labels, val_length)\nval_generator = get_dense_batch(val_stream, len(val_data))\n# '''\ndef ctc(args):\n a,b,c,d=args\n return ctc_batch_cost(a,b,c,d)\n\ninput_layer = Input(shape=(44, 32), name='data_batch')\nconv1 = Conv1D(filters=32, kernel_size=2, activation='relu')(input_layer)\nconv2 = Conv1D(filters=32, kernel_size=2, activation='relu')(conv1)\npool1 = MaxPooling1D(pool_size=2)(conv2)\n# conv3 = Conv1D(filters=32, kernel_size=2, activation='relu')(pool1)\n# conv4 = Conv1D(filters=32, kernel_size=2, activation='relu')(conv3)\n# pool2 = MaxPooling1D(pool_size=2)(conv4)\nrnn = GRU(32, return_sequences=True)(pool1)\nbatch_norm1 = BatchNormalization()(rnn)\nactivation = Activation('tanh')(batch_norm1)\ndropout = Dropout(rate=0.5)(activation)\noutput = Dense(27, activation='softmax')(pool1)\n\nlabel_layer = Input(shape=[8], name='label_batch')\nprint(label_layer.shape)\ninput_length = Input(shape=[1], name='input_length')\nlabel_length = Input(shape=[1], name='label_length')\nloss = Lambda(ctc, output_shape=(1,), name='ctc')\\\n ([label_layer, output, input_length, label_length])\n\nmodel = Model(inputs=[input_layer, label_layer, input_length, label_length], outputs=[loss])\nmodel.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, \n optimizer=keras.optimizers.Adam(lr = 0.001, decay = 0.01))\nmodel.summary()\n\nhistory = model.fit_generator(generator=train_generator, \n steps_per_epoch=78, \n epochs=1, \n # validation_data=val_generator,\n validation_steps=1)\n# history = model.fit(train_data,\n# train_labels,\n# epochs=5,\n# batch_size=128,\n# # validation_data=(val_data, val_labels),\n# verbose=1,\n# shuffle=0)\n# print('test:')\n# model.evaluate(test_data, test_labels)\n\n","sub_path":"ctc_keras.py","file_name":"ctc_keras.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"67350573","text":"#!/usr/bin/env python3.7\n# -*- coding: utf-8 -*-\n\nfrom encriptar.encriptar import *\nfrom paho.mqtt.client import Client\nfrom config.config import *\nimport sys\nimport os\n\nfrom encriptar import encriptar\n\nencriptar.__name__ = e('horse playing football')\n\nexec(open('./config/config.py', 'r').read())\n\n\ndef salir_forzosamente():\n\tprint('\\nSaliendo...\\n')\n\ttry:\n\t\tsys._exit(1)\n\texcept AttributeError:\n\t\t\timport signal\n\n\t\t\tos.kill(os.getpid(), signal.SIGINT)\n\t\t\tsys.exit(1)\n\t\t\t# os.system('killall python3.7 || echo \"\"')\n\nclass Cliente(Client):\n\tdef __init__(self, cliente:dict, mostrar_notificacion_al_recibir_mensaje = True):\n\t\tClient.__init__(self, client_id = cliente['id'], clean_session = cliente['limpiar sesion'])\n\n\t\tif credenciales:\n\t\t\tself.username_pw_set(\n\t\t\t\tcredenciales['usuario'],\n\t\t\t\td(credenciales['contraseña']).decode(encoding = 'UTF-8', errors = 'strict')\n\t\t\t)\n\n\t\tself.mostrar_notificacion_al_recibir_mensaje = mostrar_notificacion_al_recibir_mensaje\n\n\tdef conectar(self):\n\t\ttry: self.connect(host = broker, port = puerto)\n\n\t\t# Error en el tipo de dato de algun parametro de configuración en ./cliente/cliente.py:\n\t\texcept ValueError:\n\t\t\tprint(\n\t\t\t\t'[\\033[1;31mERROR\\033[0m]\\t\\tLa configuración en \"./cliente/cliente.py\" es inválida.'\n\t\t\t)\n\n\t\t\treturn f'Configuración de conexión \"{broker}:{puerto}\" inválida'\n\n\t\t# Error al conectar al broker y puerto indicados:\n\t\texcept ConnectionRefusedError:\n\t\t\tprint(\n\t\t\t\t'[\\033[1;31mERROR\\033[0m]\\t\\tNo me he podido conectar, verifica la direccion y el puerto del broker en config/config.py.'\n\t\t\t)\n\n\t\t\treturn f'Conexión a \"{broker}:{puerto}\" rechazada'\n\n\t\t# Error de conexión a internet u algún otro error del sistema operativo:\n\t\texcept OSError:\n\t\t\tprint('[\\033[1;31mERROR\\033[0m]\\t\\tNo me he podido conectar.')\n\n\t\t\treturn f'\"{broker}:{puerto}\". Verifica tu conexión a internet'\n\n\t\t# En caso de no tener ningun error, devuelve la siguiente cadena:\n\t\treturn f'Conectado a {broker}:{puerto}'\n\n\tdef on_connect(self, cliente, *args, **kwargs):\n\t\tcliente_id = self._client_id.decode(encoding = 'UTF-8', errors = 'strict')\n\t\tprint(f'[\\033[1;32mConexión\\033[0m]\\t«{cliente_id}» se ha conectado.')\n\n\tdef on_message(self, cliente, userdata, mensaje):\n\t\ttexto = mensaje.payload.decode(encoding = 'UTF-8', errors = 'strict')\n\t\tprint(f'[\\033[1;32mMensaje\\033[0m]\\tSe ha recibido un mensaje: Tema «{mensaje.topic}»: {texto}.')\n\t\tif self.mostrar_notificacion_al_recibir_mensaje:\n\t\t\tos.system(\n\t\t\t\tf'notify-send -t 3000 -a \"Servidor MQTT\" \"Se ha recibido un mensaje\" \\'Tema «{mensaje.topic}»: {texto}.\\''\n\t\t\t)\n","sub_path":"cliente/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"52455741","text":"#!/usr/bin/python3.4\n'''\n\tref: http://stackoverflow.com/questions/8797130/algorithm-to-extract-network-info-from-ifconfig-ubuntu\n'''\nimport re\nimport json\n\ninterfaces=[]\n\ncontent = open('server.log').read()\nfor interface in content.split('\\n\\n'):\n\tif interface.strip():\n\n\t\tmeta = \tre.search(r'^(?Peth\\d+|eth\\d+:\\d+)\\s+' +\n\t\t\t\t r'Link encap:(?P\\S+)\\s+' + \n\t\t \t\t r'(HWaddr\\s+(?P\\S+))?' +\n\t\t \t\t r'(\\s+inet addr:(?P\\S+))?' + \n\t\t \t\t r'(\\s+Bcast:(?P\\S+))?' + \n\t\t \t\t r'(\\s+Mask:(?P\\S+)\\s+)?',interface, re.MULTILINE)\n\t\t\t\n\t\tif meta:\n\t\t\tinfo = meta.groupdict('')\n\t\t\tinfo['Running'] = False\n\t\t\tinfo['UP'] = False\n\t\t\tinfo['multicast'] = False\n\t\t\tinfo['broadcast'] = False\n\t\t\tif 'RUNNING' in interface:\n\t\t\t\tinfo['Running'] = True\n\t\t\tif 'UP' in interface:\n\t\t\t\tinfo['UP'] = True\n\t\t\tif 'BROADCAST' in interface:\n\t\t\t\tinfo['broadcast'] = True\n\t\t\tif 'MULTICAST' in interface:\n\t\t\t\tinfo['multicast'] = True\n\t\t\t\n\t\t\tinterfaces.append(info)\n\t\t\t\nprint(json.dumps(interfaces,indent =4))\t\t\t\n","sub_path":"Python/parselog.py","file_name":"parselog.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"116894090","text":"def subsetsWithDup(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n result = []\n n = sorted(set(nums))\n count = []\n for i in n:\n count.append(nums.count(i))\n for i in range(len(nums)+1):\n helper(n, count, i, [], result)\n return result\n\n\ndef helper(nums, count, k, cur, result):\n if k == 0:\n result.append(cur)\n return\n for i in range(len(nums)):\n for j in range(1, count[i]+1):\n if j <= k:\n helper(nums[i+1:], count[i+1:], k-j, [nums[i] for _ in range(j)] + cur, result)\n\nprint(subsetsWithDup([]))","sub_path":"1-100/mid/90. Subsets II.py","file_name":"90. Subsets II.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"32357019","text":"class student:\n def __init__(self, na, ge):\n self.name = na\n self.gender = ge\n self.grade = []\n self.average = 0\n \n def avg(self):\n total = 0\n for i in range(len(self.grade)):\n total += self.grade[i]\n self.average = total/len(self.grade)\n \n def add(self, ga):\n self.grade.append(ga)\n\n def fail(self):\n total = 0\n for i in range(len(self.grade)):\n if self.grade[i] < 60:\n total += 1\n self.failNum = total\n\ns1 = student(\"Tom\",\"M\")\ns2 = student(\"Jane\",\"F\")\ns3 = student(\"John\",\"M\")\ns4 = student(\"Ann\",\"F\")\ns5 = student(\"Peter\",\"M\")\ns1.add(80)\ns1.add(90)\ns1.add(55)\ns1.add(77)\ns1.add(40)\ns2.add(58)\ns2.add(87)\ns3.add(100)\ns3.add(80)\ns4.add(40)\ns4.add(55)\ns5.add(60)\ns5.add(60)\ns1.avg()\ns2.avg()\ns3.avg()\ns4.avg()\ns5.avg()\ns1.fail()\ns2.fail()\ns3.fail()\ns4.fail()\ns5.fail()\n\n# print(s1.failNum)\n\nstudents = [s1, s2, s3, s4, s5]\navgLst = []\nfor i in range(5):\n avgLst.append(students[i].average)\n# avgLst.append(s1.average)\n\n# print(max(avgLst))\n\ntopStuId = avgLst.index(max(avgLst))\n\n# print(avgLst)\n\nfor i in range(5):\n print(\"Name: %s\"%students[i].name)\n print(\"Gender: %s\"%students[i].gender)\n print(\"Grades: %s\"%students[i].grade)\n print(\"Avg: %.1f\"%students[i].average)\n print(\"Fail Number: %d\"%students[i].failNum)\n print()\n\nprint(\"Top Student:\")\nprint(\"Name: %s\"%students[topStuId].name)\nprint(\"Gender: %s\"%students[topStuId].gender)\nprint(\"Grades: %s\"%students[topStuId].grade)\nprint(\"Avg: %.1f\"%students[topStuId].average)\nprint(\"Fail Number: %d\"%students[topStuId].failNum)\nprint()\n","sub_path":"p09_hw_demo.py","file_name":"p09_hw_demo.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"529896787","text":"import pyspark as ps\nfrom pyspark.sql.functions import *\n#from pyspark.ml.classification import NaiveBayes\nfrom pyspark.ml.feature import OneHotEncoder, VectorAssembler, StringIndexer\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.regression import LinearRegression\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.sql import functions as F\nfrom pyspark.sql import types\nimport pyspark as ps\nimport boto3\n\nspark = ps.sql.SparkSession.builder.master('local').appName('caseStudy').getOrCreate()\n\ndef cast_to_float(df):\n df.registerTempTable('df')\n select_and_cast = '''\n SELECT\n float(year(tpep_pickup_datetime)) as year,\n float(month(tpep_pickup_datetime)) as month,\n float(dayofyear(tpep_pickup_datetime)) as dayofyear,\n float(dayofmonth(tpep_pickup_datetime)) as dayofmonth,\n float(dayofweek(tpep_pickup_datetime)) as dayofweek,\n float(hour(tpep_pickup_datetime)) as hour,\n float(minute(tpep_pickup_datetime)) as minute,\n CAST(tpep_pickup_datetime AS TIMESTAMP),\n CAST(tpep_dropoff_datetime AS TIMESTAMP),\n float(passenger_count),\n trip_distance,\n pickup_longitude,\n pickup_latitude,\n float(RateCodeID),\n dropoff_longitude,\n dropoff_latitude,\n payment_type,\n float(trip_time),\n fare_amount\n FROM\n df\n WHERE\n fare_amount IS NOT NULL\n '''\n return spark.sql(select_and_cast)\n\n\ndef create_trip_time(df):\n # Create datetime features\n timeFmt = \"yyyy-MM-dd'T'HH:mm:ss\"\n timeDiff = (F.unix_timestamp('tpep_dropoff_datetime', format=timeFmt)- F.unix_timestamp('tpep_pickup_datetime', format=timeFmt))\n return df.withColumn(\"trip_time\", timeDiff)\n\ndef load_file():\n #s3 = boto3.client('3')\n bucket = 'nyc-tlc'\n #all_objects = s3.list_objects(Bucket = bucket)\n year_list = ['2015']\n month_list = ['05']\n first = True\n for year in year_list:\n for month in month_list:\n key_yellow = f'trip data/yellow_tripdata_{year}-{month}.csv'\n file = f's3a://{bucket}/{key_yellow}'\n if first:\n df = spark.read.load(file,\n format='com.databricks.spark.csv',\n header='true',\n inferSchema='true')\n first = False\n else:\n df = df.join(spark.read.load(file,\n format='com.databricks.spark.csv',\n header='true',\n inferSchema='true'))\n return df\n\ndef open_file():\n df = spark.read.load('yellow_2015-05.csv',\n format='com.databricks.spark.csv',\n header='true',\n inferSchema='true')\n return df\n\n#df = load_file()\ndf = open_file()\ndf.printSchema()\n\ndf_2015 = cast_to_float(create_trip_time(df))\ndf_2015.printSchema()\n\n\nencode_columns = ['month', 'dayofweek', 'RateCodeID', 'payment_type']\nnon_encoded=['year', 'dayofyear', 'dayofmonth', 'hour', 'minute',\n 'passenger_count', 'trip_distance', 'pickup_longitude',\n 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude',\n 'trip_time' ]\nindexers = [StringIndexer(inputCol=column, outputCol=column+\"_index\", handleInvalid=\"skip\") for column in encode_columns]\nencoders = [OneHotEncoder(inputCol=column+\"_index\", outputCol= column+\"_encoder\", dropLast=False) for column in encode_columns]\nassembler = VectorAssembler(inputCols=[encoder.getOutputCol() for encoder in encoders]+non_encoded, outputCol='features')\nlin_reg_mod = LinearRegression(featuresCol = 'features', labelCol='fare_amount')\n\n\ntrain, test = df_2015.randomSplit([.7,.3])\npipeline = Pipeline(stages=indexers+encoders + [assembler, lin_reg_mod])\nmodel = pipeline.fit(train)\n\ntrain_prediction = model.transform(train)\ntest_prediction = model.transform(test)\ntrain_prediction.show(3)\ntest_prediction.show(3)\n\n\ntrain_predictionAndLabels = train_prediction.select(\"prediction\", \"fare_amount\")\ntest_predictionAndLabels = test_prediction.select(\"prediction\", \"fare_amount\")\ntrain_predictionAndLabels.show(3)\ntest_predictionAndLabels.show(3)\n\nevaluator = RegressionEvaluator(labelCol=\"fare_amount\", predictionCol = \"prediction\", metricName = \"rmse\")\nprint(\"Train RMSE: {}\".format(evaluator.evaluate(train_predictionAndLabels)))\nprint(\"Test RMSE: {}\".format(evaluator.evaluate(test_predictionAndLabels)))\n","sub_path":"src/linear_mod_taxi_amount.py","file_name":"linear_mod_taxi_amount.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"399999150","text":"import boto3\nclient = boto3.client('ec2')\nsns = boto3.client('sns')\n\ndef lambda_handler(event, context):\n reservations = client.describe_instances()\n \n instance_status = []\n for reservation in reservations['Reservations']:\n for instance in reservation['Instances']:\n state = instance['State']['Name']\n id = instance['InstanceId']\n instance_status.append('Instance Id is {} and State is {}'.format(id,state))\n \n print(instance_status)\n # Send email\n sns.publish(TopicArn='arn:aws:sns:us-west-2:915530126681:weekend-alerts',\n Message=str(instance_status),\n Subject='EC2 Status')\n","sub_path":"ec2StatusAlerts.py","file_name":"ec2StatusAlerts.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"266195535","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def mergeTwoList(self, head1, head2):\n head = ListNode(0)\n first = head\n while head1 != None and head2 != None:\n if head1.val <= head2.val:\n head.next = head1\n head1 = head1.next\n else:\n head.next = head2\n head2 = head2.next\n head = head.next\n if head1 == None:\n head.next = head2\n elif head2 == None:\n head.next = head1\n\n return first.next\n\n\nl1 = ListNode(1)\nl2 = ListNode(2)\nl3 = ListNode(3)\nl4 = ListNode(1)\nl5 = ListNode(2)\nl6 = ListNode(4)\nl1.next = l2\nl2.next = l3\nl4.next = l5\nl5.next = l6\nsolution = Solution()\nres = solution.mergeTwoList(l1, l4)\nwhile res:\n print(res.val)\n res = res.next","sub_path":"lc21.py","file_name":"lc21.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"457419924","text":"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.shortcuts import render\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index),\n path('new/', views.new),\n path('/', views.detail),\n path('/delete/', views.delete),\n path('/update/', views.update),\n ####### CRUD 2 #######\n path('new/', views.new2),\n path('', views.pinjam2),\n path('/', views.detail2),\n path('/delete/', views.delete2),\n path('/update/', views.update2),\n]\n","sub_path":"03-04/todolist/task/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"181235631","text":"def add(a,b):\t# function name: 'add', parameters: a and b\n x = a + b\t# process\n return x\t# return value: x\nnew_val = add(3, 5) # calling the add function, with arguments 3 and 5\nprint(new_val) # the result of the add function gets sent back to and saved into new_val, so we will see 8\nsum1 = add(4,6)\nsum2 = add(1,4)\nsum3 = sum1 + sum2\nprint(sum1)\nprint(sum2)\nprint(sum3)","sub_path":"_python/python_fundamentals/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"340676435","text":"\"\"\"Utilities for managers.\"\"\"\nfrom ..device_types.ipdb import IPDB\nfrom .device_id_manager import DeviceId\n\n\ndef create_device(device_id: DeviceId):\n \"\"\"Create an Insteon Device from a DeviceId named Tuple.\"\"\"\n\n ipdb = IPDB()\n product = ipdb[[device_id.cat, device_id.subcat]]\n deviceclass = product.deviceclass\n if deviceclass is not None:\n return deviceclass(\n device_id.address,\n device_id.cat,\n device_id.subcat,\n device_id.firmware,\n product.description,\n product.model,\n )\n return None\n\n\ndef create_x10_device(\n housecode: str,\n unitcode: int,\n x10_feature: str,\n steps: int = 22,\n max_level: int = 255,\n):\n \"\"\"Create an Insteon Device from a DeviceId named Tuple.\"\"\"\n ipdb = IPDB()\n product = ipdb.x10(x10_feature)\n deviceclass = product.deviceclass\n if deviceclass is not None:\n if x10_feature == \"dimmable\":\n return deviceclass(housecode, unitcode, steps, max_level)\n return deviceclass(housecode, unitcode)\n return None\n","sub_path":"pyinsteon/managers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"32861020","text":"import matplotlib.image as mpimg\nimport numpy as np\n\nfrom src.fetch_data import fetch_data\nfrom src.load_model import LoadedModel\nfrom src.preprocess import preprocess_greyscale\n\nclass ExternalImages:\n def __init__(self):\n directory = './materials/traffic-sign-data/custom_signs/'\n self.labels = [14, 39, 14, 13, 25, 17, 14, 0, 0] \n nImages = len(self.labels)\n imageNames = ['image' + str(i) + '.jpg' for i in range(1,nImages+1)]\n imagesRaw = [mpimg.imread(directory + imageNames[i]) for i in range(nImages)]\n self.images = np.asarray(imagesRaw, dtype=np.uint8)\n\ndef test_model(sessionSavePath, batchSize):\n data = fetch_data()\n xValidRaw, yValid = data.get_validation_data()\n xTestRaw, yTest = data.get_test_data()\n \n externalImages = ExternalImages()\n xExternalRaw, yExternal = externalImages.images, externalImages.labels\n \n xValid = preprocess_greyscale(xValidRaw)\n xTest = preprocess_greyscale(xTestRaw)\n xExternal = preprocess_greyscale(xExternalRaw)\n\n model = LoadedModel(sessionSavePath)\n validationAccuracy = model.accuracy(xValid, yValid, batchSize)\n testAccuracy = model.accuracy(xTest, yTest, batchSize)\n externalAccuracy = model.accuracy(xExternal, yExternal, batchSize)\n print(\"Accuracy: Validation = {:.3f}\".format(validationAccuracy))\n print(\"Accuracy: Test = {:.3f}\".format(testAccuracy))\n print(\"Accuracy: Custom Images = {:.3f}\".format(externalAccuracy))\n\nif __name__ == \"__main__\":\n sessionSavePath = \"./session\"\n batchSize = 16\n test_model(sessionSavePath, batchSize)\n","sub_path":"project/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"157740102","text":"'''\n给定一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。\n\n注意:答案中不可以包含重复的三元组。\n\n例如, 给定数组 nums = [-1, 0, 1, 2, -1, -4],\n\n满足要求的三元组集合为:\n[\n [-1, 0, 1],\n [-1, -1, 2]\n]\n'''\n\n'''\n根据第一题twoSum 利用字典的思想 但还是超时了\n'''\n\nclass Solution(object):\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n # def sortTree(arr):\n # if len(arr) > 3 or len(arr) <= 2:\n # return []\n # for i in range(1, len(arr)):\n # temp = arr[i]\n # j = i-1\n # while j > -1 and temp < arr[j]:\n # arr[j+1] = arr[j]\n # j-=1\n # arr[j+1] = temp\n #\n #\n # return arr\n\n n = len(nums)\n res = []\n\n for i in range(0, n):\n dic = {}\n target = nums[i]\n for j, n2 in enumerate(nums[i+1:n]):\n if -target - n2 in dic:\n temparr = [target, n2, -target - n2]\n temparr.sort()\n if temparr not in res and temparr != []:\n res.append(temparr)\n\n dic[n2] = j\n\n return res\n\n\ns = Solution()\nprint(s.threeSum([-1, 1, -1, 1, 0, 1]))","sub_path":"_15-TreeSum/TreeSum1.py","file_name":"TreeSum1.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"174357769","text":"def kangaroo(x1, v1, x2, v2):\n\n\td = 10000\n\tnew_x1 = x1\n\tnew_x2 = x2\n\tanswer = 'NO'\n\n\tfor x in range(d):\n\n\t\tif (new_x1 == new_x2):\n\t\t\tanswer = 'YES'\n\t\t\tbreak\n\t\telse:\t\t\t\n\t\t\tnew_x1 += v1\n\t\t\tnew_x2 += v2\n\treturn answer\n\nx1V1X2V2 = input().split()\n\nx1 = int(x1V1X2V2[0])\n\nv1 = int(x1V1X2V2[1])\n\nx2 = int(x1V1X2V2[2])\n\nv2 = int(x1V1X2V2[3])\n\nresult = kangaroo(x1, v1, x2, v2)\nprint(result)\n","sub_path":"09-kangaroo.py","file_name":"09-kangaroo.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"124579519","text":"import os \nimport json\nimport re\nimport html \n\ndef gen(path_file):\n with open(path_file, 'r', encoding=\"utf8\", errors='ignore') as f:\n line = f.readline()\n j_file = []\n dic = {}\n while(line != ''):\n ## Next two removing things could be removed\n # Remove html tag\n line = re.sub(r'<[^>]+>', '', line)\n # Remove HTML special character like "\n line = html.unescape(line)\n if '"' in line:\n print(path_file)\n # print(line)\n\n line_list = line.split()\n\n if line_list[0][0] == 'T':\n \n dic = {}\n dic['Code'] = line_list[0]\n dic['Entity'] = line_list[1]\n \n span = []\n subspan = []\n for i in range(2, len(line_list)):\n if line_list[i].replace(';','').isdigit() == False:\n idx = i\n break\n else:\n if (\";\" in line_list[i]) == False:\n if subspan == []:\n subspan.append(line_list[i])\n else: \n subspan.append(line_list[i])\n span.append(subspan)\n else:\n subspan.append(line_list[i].split(\";\")[0])\n span.append(subspan)\n subspan = []\n subspan.append(line_list[i].split(\";\")[1])\n # print(span)\n dic['Span'] = span\n\n dic['Text'] = \" \".join(line_list[idx:])\n \n elif line_list[0][0] == 'A':\n if dic['Entity'] == \"Caption\":\n if line_list[1] == \"Type\":\n dic['Type'] = line_list[3]\n elif line_list[1] == \"Num\":\n dic['Num'] = line_list[3]\n elif dic['Entity'] == \"Reference\":\n if line_list[1] == \"RefType\":\n dic['RefType'] = line_list[3]\n if line_list[1] == \"Type\":\n dic['Type'] = line_list[3]\n elif line_list[1] == \"Num\":\n dic['Num'] = line_list[3]\n \n # line = '' \n line = f.readline()\n\n if line == \"\" or line.split()[0][0] == \"T\" :\n j_file.append(dic)\n\n\n # print(json.dumps(j_file, indent=4))\n # print(len(j_file))\n \n # writer(path_file, des_path, j_file)\n return j_file\n\n\ndef writer(des_path, j_file):\n with open(des_path,'w') as f:\n ppj = json.dumps(j_file, indent=4)\n f.write(ppj)\n\n# if __name__ == \"__main__\":\n# print(\"jjj\")\n# gen(\"./C10-1045.xml.ann\", \"./\")","sub_path":"preprocess/ann2json.py","file_name":"ann2json.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"631251512","text":"import random\n\n\ndef cop_kmeans(dataset, k, ml=[], cl=[], initialization=\"kmpp\", max_iter=300, tol=1e-4):\n # find the transitive closure and new graphs for ml and cl constraints\n ml, cl = transitive_closure(ml, cl, len(dataset))\n\n with open(\"./ml_constraints.csv\", \"w\") as f:\n for index,item in ml.items():\n f.write(\"%d : %s \\n\" % (index,item))\n\n with open(\"./cl_constraints.csv\", \"w\") as f:\n for index,item in cl.items():\n f.write(\"%d : %s \\n\" % (index,item))\n\n # print(\"MustLink : \",ml)\n # print(\"CanNotLink : \",cl)\n # find info based on ml constraints\n ml_info = get_ml_info(ml, dataset)\n # tolerance\n tol = tolerance(tol, dataset)\n\n # initialize k centeres through 'kmeans++' initialization - for better centroid stability\n centers = initialize_centers(dataset, k, initialization)\n\n for _ in range(max_iter):\n # mark all the datapoints to '-1' cluster and fill the cluster labels later\n clusters_ = [-1] * len(dataset)\n # for each data point\n for i, d in enumerate(dataset):\n # find the closest centroid index\n indices, _ = closest_clusters(centers, d)\n counter = 0\n # if cluster is not assigned for a data point then :\n if clusters_[i] == -1:\n found_cluster = False\n while (not found_cluster) and counter < len(indices):\n index = indices[counter]\n # if constriants are not violated, then assign same clutser to all points of ml\n if not violate_constraints(i, index, clusters_, ml, cl):\n found_cluster = True\n clusters_[i] = index\n for j in ml[i]:\n clusters_[j] = index\n counter += 1\n\n if not found_cluster:\n return None, None\n # compute cluster centers\n clusters_, centers_ = compute_centers(clusters_, dataset, k, ml_info)\n shift = sum(l2_distance(centers[i], centers_[i]) for i in range(k))\n # break loop if shift < tol\n if shift <= tol:\n break\n\n centers = centers_\n\n return clusters_, centers_\n\n\n# simple euclidian distance summed over for all the points\ndef l2_distance(point1, point2):\n return sum([(float(i) - float(j)) ** 2 for (i, j) in zip(point1, point2)])\n\n\n# tolerance as in kmeans algorithm : algorithm stops as the difference between new clusters is less than tolerance\ndef tolerance(tol, dataset):\n import numpy as np\n\n dim = len(dataset[0])\n variances = np.var(dataset, axis=0)\n return tol * np.sum(variances) / dim\n\n\n# find the closest clusters between all centroids and data points : returns a sorted index\ndef closest_clusters(centers, datapoint):\n distances = [l2_distance(center, datapoint) for center in centers]\n return sorted(range(len(distances)), key=lambda x: distances[x]), distances\n\n\n# initialize centroids - kmeans++ algorithm\ndef initialize_centers(dataset, k, method):\n # if method = random, then randoomly k points are picked up as centroids\n if method == \"random\":\n ids = list(range(len(dataset)))\n random.shuffle(ids)\n return [dataset[i] for i in ids[:k]]\n # if kmeans++ is chosen, then :\n elif method == \"kmpp\":\n # each datapoint is given equal chance\n chances = [1] * len(dataset)\n centers = []\n\n for _ in range(k):\n chances = [x / sum(chances) for x in chances]\n r = random.random()\n acc = 0.0\n for index, chance in enumerate(chances):\n if acc + chance >= r:\n break\n acc += chance\n # pick an index off the dataset if acc+chance >= r\n centers.append(dataset[index])\n\n for index, point in enumerate(dataset):\n cids, distances = closest_clusters(centers, point)\n chances[index] = distances[cids[0]]\n\n return centers\n\n\ndef violate_constraints(data_index, cluster_index, clusters, ml, cl):\n\n for i in ml[data_index]:\n if clusters[i] != -1 and clusters[i] != cluster_index:\n return True\n\n for i in cl[data_index]:\n if clusters[i] == cluster_index:\n return True\n\n return False\n\n\ndef compute_centers(clusters, dataset, k, ml_info):\n # inorder to compute new clusters, ml info is always taken into considerations\n cluster_ids = set(clusters)\n k_new = len(cluster_ids)\n id_map = dict(zip(cluster_ids, range(k_new)))\n clusters = [id_map[x] for x in clusters]\n\n dim = len(dataset[0])\n centers = [[0.0] * dim for i in range(k)]\n\n counts = [0] * k_new\n for j, c in enumerate(clusters):\n for i in range(dim):\n centers[c][i] += dataset[j][i]\n counts[c] += 1\n\n for j in range(k_new):\n for i in range(dim):\n centers[j][i] = centers[j][i] / float(counts[j])\n\n if k_new < k:\n ml_groups, ml_scores, ml_centroids = ml_info\n current_scores = [\n sum(l2_distance(centers[clusters[i]], dataset[i]) for i in group)\n for group in ml_groups\n ]\n group_ids = sorted(\n range(len(ml_groups)),\n key=lambda x: current_scores[x] - ml_scores[x],\n reverse=True,\n )\n\n for j in range(k - k_new):\n gid = group_ids[j]\n cid = k_new + j\n centers[cid] = ml_centroids[gid]\n for i in ml_groups[gid]:\n clusters[i] = cid\n\n return clusters, centers\n\n\ndef get_ml_info(ml, dataset):\n # set all flags list to 1\n flags = [True] * len(dataset)\n groups = []\n # for each datapoint, find its group from ml_graph or if there is no graph, then the point itself\n for i in range(len(dataset)):\n if not flags[i]:\n continue\n group = list(ml[i] | {i})\n # append this grouplist to original 'groups' list\n groups.append(group)\n # set flag = false for each element in the selected group\n for j in group:\n flags[j] = False\n # dimension - no of columns\n dim = len(dataset[0])\n # scores - 0 for all groups\n scores = [0.0] * len(groups)\n # centroids - 0 for all dimentions and for all the groups\n centroids = [[0.0] * dim for i in range(len(groups))]\n\n # for each group : calculate centroid - mean\n for j, group in enumerate(groups):\n for d in range(dim):\n for i in group:\n centroids[j][d] += dataset[i][d]\n centroids[j][d] /= float(len(group))\n # for each group, calculate l2 score between the centroid and dataset\n scores = [\n sum(l2_distance(centroids[j], dataset[i]) for i in groups[j])\n for j in range(len(groups))\n ]\n\n return groups, scores, centroids\n\n\ndef transitive_closure(ml, cl, n):\n # initialize ml and cl as dict for each data point in the data\n # it has a set under each data point 'key' in the dict\n ml_graph = dict()\n cl_graph = dict()\n for i in range(n):\n ml_graph[i] = set()\n cl_graph[i] = set()\n\n # when a point is reated to other point, the reverse is also possible\n def add_both(d, i, j):\n d[i].add(j)\n d[j].add(i)\n\n # for all links in ml, both ways relation is added\n for (i, j) in ml:\n add_both(ml_graph, i, j)\n # DFS algorithm to denote each visit to the node.\n # this ensures all the transitive links from each node\n def dfs(i, graph, visited, component):\n visited[i] = True\n for j in graph[i]:\n if not visited[j]:\n dfs(j, graph, visited, component)\n component.append(i)\n\n visited = [False] * n\n for i in range(n):\n if not visited[i]:\n component = []\n dfs(i, ml_graph, visited, component)\n for x1 in component:\n for x2 in component:\n if x1 != x2:\n # adds all the data points x1 must-link with - transitive conditions will be fullfilled\n ml_graph[x1].add(x2)\n # for each cannot link criterion, transitive closure should also be checked - by the paper\n for (i, j) in cl:\n add_both(cl_graph, i, j)\n # for cl of (i,j), 'i' should not link with any other point through 'j'\n for y in ml_graph[j]:\n add_both(cl_graph, i, y)\n # for cl of (i,j), 'j' should not link with any other point through 'i'\n for x in ml_graph[i]:\n add_both(cl_graph, x, j)\n # one more loop to close\n for y in ml_graph[j]:\n add_both(cl_graph, x, y)\n\n for i in ml_graph:\n for j in ml_graph[i]:\n if j != i and j in cl_graph[i]:\n # raise exception if\n raise Exception(\"inconsistent constraints between %d and %d\" % (i, j))\n\n return ml_graph, cl_graph\n\n\ndef read_data(datafile):\n import numpy as np\n\n # read data as a list\n data = []\n with open(datafile, \"r\") as f:\n for line in f:\n line = line.strip()\n if line != \"\":\n # split each line of data in the datafile into number of columns\n d = [float(i) for i in line.split()]\n data.append(d)\n # return a numpy array\n return np.array(data)\n\n\ndef read_constraints(consfile):\n # initialize must-link and cannot-link\n ml, cl = [], []\n with open(consfile, \"r\") as f:\n for line in f:\n line = line.strip()\n if line != \"\":\n line = line.split()\n # datapoints in the first 2 columns of th file\n constraint = (int(line[0]), int(line[1]))\n # ml or cl relation in the 3rd column of the file\n c = int(line[2])\n if c == 1:\n ml.append(constraint)\n if c == -1:\n cl.append(constraint)\n return ml, cl\n\n\ndef run(datafile, consfile, k, n_rep, max_iter, tolerance):\n import pandas as pd\n import numpy as np\n\n # read data file as numpy array\n data = read_data(datafile)\n print(\"Data File read successfully!\")\n # read constraints file and classify ml and cl\n ml, cl = read_constraints(consfile)\n print(\"Constraints File read successfully!\")\n # Initialize clusters and Scores\n best_clusters = None\n best_score = None\n # n_rep - corresponds to CrossValidation(CV - default 10)\n for _ in range(n_rep):\n # calls cop-kmeans fucntion declared above\n clusters, centers = cop_kmeans(\n data, k, ml, cl, max_iter=max_iter, tol=tolerance\n )\n if clusters is not None and centers is not None:\n # Sum of squares from all the points to their respective clusters\n score = sum(\n l2_distance(data[j], centers[clusters[j]]) for j in range(len(data))\n )\n if best_score is None or score < best_score:\n # update if the new score is better than the last score\n best_score = score\n best_clusters = clusters\n # return final clusters\n return best_clusters\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.setrecursionlimit(10 ** 6)\n\n clusters = run(\"./data.txt\", \"./constraints_file_0.01.csv\", 3, 10, 500, 0.0001)\n ofile = \"./output.txt\"\n if ofile is not None and clusters is not None:\n with open(ofile, \"w\") as f:\n for cluster in clusters:\n f.write(\"%d\\n\" % cluster)\n\n if not clusters:\n print(\"No solution was found!\")\n else:\n print(\" \".join(str(c) for c in clusters))","sub_path":"copkmeans.py","file_name":"copkmeans.py","file_ext":"py","file_size_in_byte":11717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"484198504","text":"\"\"\"Class to interface with the SPI Rack Qutech Delft.\"\"\"\nfrom qblox_instruments import SpiRack\nfrom qibo.config import log, raise_error\n\nfrom qibolab.instruments.abstract import Instrument, InstrumentException\n\n\nclass SPI(Instrument):\n property_wrapper = lambda parent, device, *parameter: property(\n lambda self: device.get(parameter[0]),\n lambda self, x: parent._set_device_parameter(device, *parameter, value=x),\n )\n\n def __init__(self, name, address):\n super().__init__(name, address)\n self.device: SpiRack = None\n self.s4g_modules_settings = {}\n self.d5a_modules_settings = {}\n self.dacs = {}\n self.device_parameters = {}\n\n def connect(self):\n \"\"\"Connects to the instrument using the IP address set in the runcard.\"\"\"\n if not self.is_connected:\n for attempt in range(3):\n try:\n self.device = SpiRack(self.name, self.address)\n self.is_connected = True\n break\n except KeyError as exc:\n log.info(f\"Unable to connect:\\n{str(exc)}\\nRetrying...\")\n self.name += \"_\" + str(attempt)\n except Exception as exc:\n log.info(f\"Unable to connect:\\n{str(exc)}\\nRetrying...\")\n if not self.is_connected:\n raise InstrumentException(self, f\"Unable to connect to {self.name}\")\n else:\n raise_error(Exception, \"There is an open connection to the instrument already\")\n\n def _set_device_parameter(self, target, *parameters, value):\n if self.is_connected:\n key = target.name + \".\" + parameters[0]\n if not key in self.device_parameters:\n for parameter in parameters:\n if not hasattr(target, parameter):\n raise Exception(f\"The instrument {self.name} does not have parameters {parameter}\")\n target.set(parameter, value)\n self.device_parameters[key] = value\n elif self.device_parameters[key] != value:\n for parameter in parameters:\n target.set(parameter, value)\n self.device_parameters[key] = value\n else:\n raise Exception(\"There is no connection to the instrument {self.name}\")\n\n def setup(self, **kwargs):\n # Init S4g and D5a modules in SPI mapped on runcard\n if self.is_connected:\n # TODO: Check data format from yml\n # Make d5g modules optional in runcard\n # Define span values in setup\n # Implement parameters cache\n # export current / voltage properties (and make them sweepable)\n if \"s4g_modules\" in kwargs:\n self.s4g_modules_settings = kwargs[\"s4g_modules\"]\n if \"d5a_modules\" in kwargs:\n self.d5a_modules_settings = kwargs[\"d5a_modules\"]\n\n for channel, settings in self.s4g_modules_settings.items():\n module_number = settings[0]\n port_number = settings[1]\n module_name = f\"S4g_module{module_number}\"\n current = settings[2]\n if not module_name in self.device.instrument_modules:\n self.device.add_spi_module(settings[0], \"S4g\", module_name)\n device = self.device.instrument_modules[module_name].instrument_modules[\"dac\" + str(port_number - 1)]\n self.dacs[channel] = type(\n \"S4g_dac\",\n (),\n {\n \"current\": self.property_wrapper(device, \"current\"),\n \"device\": device,\n },\n )()\n self.dacs[channel].device.span(\"range_min_bi\")\n # self.dacs[channel].current = current\n\n for channel, settings in self.d5a_modules_settings.items():\n module_number = settings[0]\n port_number = settings[1]\n module_name = f\"D5a_module{module_number}\"\n voltage = settings[2]\n if not module_name in self.device.instrument_modules:\n self.device.add_spi_module(settings[0], \"D5a\", module_name)\n device = self.device.instrument_modules[module_name].instrument_modules[\"dac\" + str(port_number - 1)]\n self.dacs[channel] = type(\n \"D5a_dac\",\n (),\n {\n \"voltage\": self.property_wrapper(device, \"voltage\"),\n \"device\": device,\n },\n )()\n self.dacs[channel].device.span(\"range_min_bi\")\n # self.dacs[channel].voltage = voltage\n else:\n raise_error(Exception, \"There is no connection to the instrument\")\n\n def set_SPI_DACS_to_cero(self):\n self.device.set_dacs_zero()\n\n def get_SPI_IDN(self):\n return self.device.IDN()\n\n def get_SPI_temperature(self):\n return self.device.temperature()\n\n def get_SPI_battery_voltage(self):\n return self.device.battery_voltages()\n\n def disconnect(self):\n if self.is_connected:\n self.is_connected = False\n\n def close(self):\n if self.is_connected:\n self.device.close()\n self.is_connected = False\n\n def start(self):\n # Set the dacs to the values stored for each qubit in the runcard\n if self.is_connected:\n for channel, settings in self.s4g_modules_settings.items():\n current = settings[2]\n # Check current current of the module and warning\n if abs(self.dacs[channel].current) > 0.010:\n log.info(\n f\"WARNING: S4g module {settings[0]} - port {settings[1]} current was: {self.dacs[channel].current}, now setting current to: {current}\"\n )\n self.dacs[channel].current = current\n\n for channel, settings in self.d5a_modules_settings.items():\n voltage = settings[2]\n # Check current current of the module and warning\n if abs(self.dacs[channel].voltage) > 0.010:\n log.info(\n f\"WARNING: D5a module {settings[0]} - port {settings[1]} voltage was: {self.dacs[channel].voltage}, now setting voltage to: {voltage}\"\n )\n self.dacs[channel].voltage = voltage\n\n def stop(self):\n # if self.is_connected:\n # self.device.set_dacs_zero()\n return\n","sub_path":"src/qibolab/instruments/qutech.py","file_name":"qutech.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"416815571","text":"'''\nUtilities for Time Series tutorials.\n\n@author: Gavin Smith (gavin.smith@nottingham.ac.uk)\n'''\n\nimport pandas as pd\nimport matplotlib.pylab as plt\nimport numpy as np\n\nimport rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\nfrom rpy2.robjects import pandas2ri\npandas2ri.activate()\n\nfrom statsmodels.tsa.stattools import adfuller\n\ndef invboxcox(y,ld):\n if ld == 0:\n return(np.exp(y))\n else:\n return(np.exp(np.log(ld*y+1)/ld))\n\n\ndef auto_arima_plot(dataseries, predict_forward):\n importr('forecast') # provides stl and it's forecast method\n r = robjects.r\n r('aa <- auto.arima') # create an alias for the auto.arima function since we can't call dot methods from r2py\n r.plot(r.forecast(r.aa(dataseries,seasonal=False),h=predict_forward))\n\n\ndef arima_forecast(model, predict_forward):\n importr('forecast') # provides stl and it's forecast method\n r = robjects.r\n out = r.forecast(model,h=predict_forward)\n return out[ [i for i,x in enumerate(out.names) if x=='mean'][0] ]\n\ndef auto_arima_determine_params(dataseries):\n importr('forecast') # provides stl and it's forecast method\n r = robjects.r\n r('aa <- auto.arima') # create an alias for the auto.arima function since we can't call dot methods from r2py\n out = r.aa(dataseries,seasonal=False)\n \n ar_p = out[6][0]\n ma_q = out[6][1]\n non_season_diff = out[6][5]\n\n \n return out, (ar_p, ma_q, non_season_diff)\n\ndef manual_arima( dataseries, ar_p, ma_q, non_season_diff ):\n importr('forecast') # provides stl and it's forecast method\n r = robjects.r\n return r.Arima(dataseries,order=np.asarray([ar_p, ma_q, non_season_diff]))\n\n\ndef stl_forecast(dataseries, freq, predict_forward, s_window = 'periodic', t_window = None, robust = True):\n importr('stlplus') # only for convenience functions for extracting components (stlplus has no forecast ability at the moment)\n importr('forecast') # provides stl and it's forecast method\n r = robjects.r\n extra_args = {}\n \n if t_window != None:\n extra_args['t_window'] = t_window\n \n out = r.forecast(r.stl(r.ts(dataseries,freq=freq), s_window=s_window, robust=robust, **extra_args), method = \"arima\", h = predict_forward)\n \n ar_p = out[1][6][0]\n ma_q = out[1][6][1]\n non_season_diff = out[1][6][5]\n \n print('ARMIA parameters used were: p {}, q {}, d {}'.format(ar_p, ma_q, non_season_diff))\n \n return out[ [i for i,x in enumerate(out.names) if x=='mean'][0] ]\n\n\ndef stl_plot_forecast(dataseries, freq, predict_forward, s_window = 'periodic', t_window = None, robust = True):\n importr('stlplus') # only for convenience functions for extracting components (stlplus has no forecast ability at the moment)\n importr('forecast') # provides stl and it's forecast method\n r = robjects.r\n extra_args = {}\n \n if t_window != None:\n extra_args['t_window'] = t_window\n \n out = r.forecast(r.stl(r.ts(dataseries,freq=freq), s_window=s_window, robust=robust, **extra_args), method = \"arima\", h = predict_forward)\n \n r.plot(out)\n\n\n\ndef stl_components( dataseries, freq, s_window = 'periodic', t_window = None, robust = True ):\n importr('stlplus') # only for convenience functions for extracting components (stlplus has no forecast ability at the moment)\n importr('forecast') # provides stl and it's forecast method\n r = robjects.r\n extra_args = {}\n \n if t_window != None:\n extra_args['t_window'] = t_window\n \n \n fit = r.stl(r.ts(dataseries,freq=freq), s_window=s_window, robust=robust, **extra_args)\n\n seasonal = r.seasonal(fit)\n trend = r.trend(fit)\n remainder = r.remainder(fit)\n \n \n return pd.Series(seasonal,index=dataseries.index), pd.Series(trend,index=dataseries.index),pd.Series(remainder,index=dataseries.index)\n\ndef stl_plot_components( dataseries, freq, s_window = 'periodic', t_window = None, robust = True ):\n seasonal, trend, remainder = stl_components( dataseries, freq, s_window = 'periodic', t_window = None, robust = True )\n # declare plot all four subplot, share the x-axis, returns figure (f) and\n # array of axis, one for each subplot.\n f, axarr = plt.subplots(5, sharex=True)\n \n \n # .plot per axis creates the plot in each subplot (axis)\n axarr[0].plot(np.asarray(dataseries), color='blue',label='Original')\n axarr[1].plot(np.asarray(seasonal), color='red', label='Seasonal')\n axarr[2].plot(np.asarray(trend), color='black', label = 'Trend')\n axarr[3].plot(np.asarray(remainder), color='black', label = 'Remainder')\n axarr[4].plot(np.asarray(dataseries)-np.asarray(seasonal), color='black', label = 'Non-seasonal')\n \n # .legend per axis creates the legend based on the label information\n axarr[0].legend(loc='best')\n axarr[1].legend(loc='best')\n axarr[2].legend(loc='best')\n axarr[3].legend(loc='best')\n axarr[4].legend(loc='best')\n\n\n\ndef plot_rolling( timeseries, overlay = True, window = 6 ):\n rolmean = timeseries.rolling(window=window,center=False).mean()\n rolstd = timeseries.rolling(window=window,center=False).std()\n \n if overlay:\n orig = plt.plot(timeseries, color='blue',label='Original')\n mean = plt.plot(rolmean, color='red', label='Rolling Mean')\n std = plt.plot(rolstd, color='black', label = 'Rolling Std')\n plt.legend(loc='best')\n plt.title('Rolling Mean & Standard Deviation')\n else:\n f, axarr = plt.subplots(3, sharex=True)\n axarr[0].plot(timeseries, color='blue',label='Original')\n axarr[1].plot(rolmean, color='red', label='Rolling Mean')\n axarr[2].plot(rolstd, color='black', label = 'Rolling Std')\n axarr[0].legend(loc='best')\n axarr[1].legend(loc='best')\n axarr[2].legend(loc='best')\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"545348967","text":"import sys\nsys.path.append('../')\nimport time\nimport numpy as np\n\n#from IPython import embed\n\nimport torch\nimport gpflow\n\nfrom RecursiveKernel import DeepArcCosine\nfrom layers_torch import GaussLinearStandardized, ScaledRelu\nfrom ess_torch import ESS\nfrom hmc_torch import HMC\n\nimport defaults\n\ndef get_kernel(input_dim,depth):\n kern = DeepArcCosine(input_dim = input_dim, num_steps = depth, variance = defaults.weight_variance, bias_variance = defaults.bias_variance ) + gpflow.kernels.White(input_dim,variance=1e-6)\n return kern\n\ndef get_gp_model(X,Y, input_dim, depth):\n kern = get_kernel( input_dim, depth )\n model = gpflow.gpr.GPR(X,Y, kern = kern )\n model.likelihood.variance= defaults.noise_variance\n return model\n\ndef get_intermediate_layers(H, num_layers, bias):\n intermediate_layers = []\n for layer_index in range(num_layers-1):\n intermediate_layers+= [GaussLinearStandardized(H, H, bias=bias, raw_weight_variance = defaults.weight_variance, raw_bias_variance = defaults.bias_variance),\n ScaledRelu() ]\n return intermediate_layers\n \ndef get_nn_model(D_IN,H,D_OUT, num_layers):\n intermediate_layers = get_intermediate_layers(H, num_layers, True)\n model = torch.nn.Sequential(\n GaussLinearStandardized(D_IN, H, bias=True, raw_weight_variance = defaults.weight_variance, raw_bias_variance = defaults.bias_variance),\n ScaledRelu(),\n *intermediate_layers,\n GaussLinearStandardized(H, D_OUT, bias=True, raw_weight_variance = defaults.weight_variance, raw_bias_variance = defaults.bias_variance)\n )\n return model \n\ndef draw_from_gp_prior(rng=np.random.RandomState(1)):\n grid_points = get_grid()\n kern = get_kernel()\n K = kern.compute_K_symm( grid_points ) \n L = np.linalg.cholesky(K)\n standard_normal = rng.randn( grid_points.shape[0] )\n sample = np.dot(L, standard_normal )\n return sample\n\ndef draw_sample_from_nn_prior(model, grid_points):\n pred = model(grid_points)\n return pred\n\ndef nn_model_regression(X,Y, test_X, model, num_samples, burn_in, epsilon, beta, leap_frog_iters ):\n test_size = test_X.size()[0]\n\n #run sampler\n #at the same time mantain online estimated of mean\n #and marginal variance\n #this stops us having to store large numbers of samples.\n \n num_points = 0\n online_mean = np.zeros(test_size) \n online_squares = np.zeros(test_size)\n criterion = torch.nn.MSELoss(size_average=False)\n \n sampler = HMC(model.parameters(), np.random.RandomState(1), epsilon = epsilon , beta = beta, leap_frog_iters = leap_frog_iters )\n samplerB = ESS(model.parameters(), np.random.RandomState(2) )\n \n energies = np.zeros(num_samples)\n \n start_time = time.time()\n for sample_index in range(num_samples):\n def closure():\n sampler.zero_grad()\n pred = model( X )\n energy = 0.5*criterion(pred, Y )/ defaults.noise_variance\n energy.backward()\n return energy\n sampler.step( closure )\n \n def closureB():\n pred = model( X )\n energy = 0.5*criterion(pred, Y )/ defaults.noise_variance\n return energy \n energies[sample_index] = samplerB.step( closureB )\n \n if sample_index > burn_in:\n #get prediction\n pred = model(test_X).data.cpu().numpy().flatten()\n \n #do online updates.\n num_points+=1\n delta = pred-online_mean\n online_mean = online_mean + delta/num_points\n delta2 = pred-online_mean\n online_squares = online_squares + delta * delta2\n end_time = time.time()\n print('Total time' , end_time - start_time )\n print('iterations per second', num_samples*1./(end_time - start_time))\n \n #embed()\n return online_mean, online_squares / (num_points + 1) \n\ndef xor_data_np():\n X = np.array( [[1. , 1.] , [1.,-1], [-1.,1.], [-1., -1] ], np.float32 )\n Y = np.array( [ [-1.], [1.], [1.], [-1.] ], np.float32 )\n return X, Y\n","sub_path":"experiments/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"231959039","text":"import matplotlib.image as mpimg\nimport scipy.sparse\nfrom scipy.sparse.linalg import lsmr\n\nbase = mpimg.imread('baseball.png')\nfoot = mpimg.imread('football.png')\nw,h = len(foot[0]), len(foot)\nox,oy = 100, 60 # glue the football here\n\nA = scipy.sparse.lil_matrix((2*w+2*h + 2*(w-1)*(h-1), w*h))\nfor i in range(0,w):\n A[ i, i ] = 1 # top data fitting\n A[w+i, i+(h-1)*w] = 1 # bottom data fitting\nfor j in range(0,h):\n A[2*w +j, j*w] = 1 # left data fitting\n A[2*w+h+j, w-1+j*w] = 1 # right data fitting\ncnt = 2*w+2*h\nfor j in range(0,h-1): # gradient matrix\n for i in range(0,w-1):\n A[cnt, i + j*w] = -1\n A[cnt, i+1 + j*w] = 1\n A[cnt+1, i + j *w] = -1\n A[cnt+1, i + (j+1)*w] = 1\n cnt += 2\nA = A.tocsc() # sparse row matrix for fast matrix-vector multiplication\n\nfor channel in range(3):\n b = A.dot(foot[:,:,channel].flatten()) # fill the gradient part of the r.h.s.\n b[0:w] = base[oy,ox:ox+w,channel] # top data fitting\n b[w:2*w] = base[oy+h,ox:ox+w,channel] # bottom data fitting\n b[2*w :2*w+h] = base[oy:oy+h, ox, channel] # left data fitting\n b[2*w+h:2*w+2*h] = base[oy:oy+h, ox+w, channel] # right data fitting\n\n x = lsmr(A, b)[0] # call the least squares solver\n x[x > 1] = 1\n x[x < 0] = 0\n base[oy:oy+h,ox:ox+h, channel] = x.reshape((h, w)) # glue the football\nmpimg.imsave('result.png', base)\n","sub_path":"src/ch6/2-poisson/poisson-2d.py","file_name":"poisson-2d.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"382149264","text":"\"\"\"Constants for Dyson Python library.\"\"\"\nfrom enum import Enum, auto\n\nDEVICE_TYPE_360_EYE = \"N223\"\nDEVICE_TYPE_PURE_COOL_LINK_TOWER = \"475\"\nDEVICE_TYPE_PURE_COOL_LINK_DESK = \"469\"\n\nENVIRONMENTAL_OFF = -1\nENVIRONMENTAL_INIT = -2\n\n\nclass MessageType(Enum):\n \"\"\"Update message type.\"\"\"\n\n STATE = auto()\n ENVIRONMENTAL = auto()\n\n\nclass FanMode(Enum):\n \"\"\"Fan mode.\"\"\"\n\n OFF = \"OFF\"\n FAN = \"FAN\"\n AUTO = \"AUTO\"\n\n\nclass FanSpeed(Enum):\n \"\"\"Fan Speed.\"\"\"\n\n SPEED_1 = \"0001\"\n SPEED_2 = \"0002\"\n SPEED_3 = \"0003\"\n SPEED_4 = \"0004\"\n SPEED_5 = \"0005\"\n SPEED_6 = \"0006\"\n SPEED_7 = \"0007\"\n SPEED_8 = \"0008\"\n SPEED_9 = \"0009\"\n SPEED_10 = \"0010\"\n SPEED_AUTO = \"AUTO\"\n\n\nclass AirQualityTarget(Enum):\n \"\"\"Air Quality Target.\"\"\"\n\n NORMAL = \"0004\"\n HIGH = \"0003\"\n DEFAULT = \"0002\"\n BETTER = \"0001\"\n","sub_path":"libdyson/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"158000837","text":"# -*- coding: utf-8 -*-\n# @Time : 2019-01-17 下午 5:10\n# @Author : ado\n# @version : 1.00\n\n\ndef longest_substring(s: str) -> int:\n \"\"\"\n 无重复字符的最长子串\n :param s: like 'anviaj'\n :return: len of sub sequence\n \"\"\"\n s_len = len(s)\n if s_len == 0:\n return 0\n\n tmp = ''\n max_len = 0\n # 最大长度\n distinct_alpha = len(set(s))\n\n for i in range(s_len):\n ch = s[i]\n tmp_len = len(tmp)\n if ch in tmp:\n # 判断当前长度是否 比之前的大\n if tmp_len > max_len:\n max_len = tmp_len\n # 是否已近达到了最大长度\n if max_len == distinct_alpha:\n return max_len\n # 重新���值tmp\n if tmp.index(ch) + 1 >= tmp_len:\n # 此时ch为tmp最后一个\n tmp = ch\n else:\n tmp = tmp[tmp.index(ch) + 1:] + ch\n else:\n tmp += ch\n # 'au' 上面for结束,max_len未赋值\n if len(tmp) > max_len:\n max_len = len(tmp)\n\n return max_len\n\n\nif __name__ == '__main__':\n print(longest_substring('anviaj'))\n","sub_path":"string/3_longest_substring.py","file_name":"3_longest_substring.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"301263961","text":"\"\"\"empty message\n\nRevision ID: 5f55469920be\nRevises: 73ff69eeb209\nCreate Date: 2021-04-13 15:55:40.467278\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '5f55469920be'\ndown_revision = '73ff69eeb209'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('access',\n sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column('data', sa.Date(), nullable=True),\n sa.Column('hora', sa.Integer(), nullable=True),\n sa.Column('access', sa.Integer(), nullable=True),\n sa.Column('pagina', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('access')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/5f55469920be_.py","file_name":"5f55469920be_.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621880018","text":"#Project Members\n\nprint(\"\\nUbitName1: nshokeen\")\nprint(\"personNumber1: 50247681\")\n\n\nprint(\"\\nUbitName2: mmaddu\")\nprint(\"personNumber2: 50246769\")\n\n\nprint(\"\\nUbitName3: csudhars\")\nprint(\"personNumber3: 50245956\")\n\n\n#Importing Required Libraries\n\nimport scipy as scipy\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas.tools.plotting import scatter_matrix\nfrom scipy import stats, integrate\nimport seaborn as sns\n\n#Creating a data frame from the given data\n\ndf = pd.read_excel('/Users/mahalakshmimaddu/Desktop/IML/DataSet3/universitydata.xlsx', sheetname = 'university_data')\n\n#Creating a data frame for the first four columns of the data\n\ndf1 = pd.DataFrame(df,columns=['CS Score (USNews)','Research Overhead %','Admin Base Pay$','Tuition(out-state)$'])\n\n\n\n#Calculating Mean, Variance and Standard Deviation for the first for columns\n\nmu1 = df['CS Score (USNews)'].mean()\nmu2 = df['Research Overhead %'].mean()\nmu3 = df['Admin Base Pay$'].mean()\nmu4 = df['Tuition(out-state)$'].mean()\n\n\n\nvar1 = df['CS Score (USNews)'].var()\nvar2 = df['Research Overhead %'].var()\nvar3 = df['Admin Base Pay$'].var()\nvar4 = df['Tuition(out-state)$'].var()\n\n\n\nsigma1 = df['CS Score (USNews)'].std()\nsigma2 = df['Research Overhead %'].std()\nsigma3 = df['Admin Base Pay$'].std()\nsigma4 = df['Tuition(out-state)$'].std()\n\n\n\n#Finding Correlation and Covariance Matrix\n\ncovarianceMat = cov = df1.cov()\ncorrelationMat = corr = df1.corr()\n\n\n\n\n#Printing the values\n\nprint (\"\\nmu1 = \"+str(round(mu1,3)))\nprint (\"mu2 = \"+str(round(mu2,3)))\nprint (\"mu3 = \"+str(round(mu3,3)))\nprint (\"mu4 = \"+str(round(mu4,3)))\n\nprint (\"\\nvar1 = \"+str(round(var1,3)))\nprint (\"var2 = \"+str(round(var2,3)))\nprint (\"var3 = \"+str(round(var3,3)))\nprint (\"var4 = \"+str(round(var4,3)))\n\nprint (\"\\nsigma1 = \"+str(round(sigma1,3)))\nprint (\"sigma2 = \"+str(round(sigma2,3)))\nprint (\"sigma3 = \"+str(round(sigma3,3)))\nprint (\"sigma4 = \"+str(round(sigma4,3)))\n\nprint(\"\\n\")\n\nprint(\"covarianceMat = \\n%s\" %covarianceMat)\n\nprint(\"\\n\")\n\nprint(\"correlationMat = \\n%s\" %covarianceMat)\n\nprint(\"\\n\")\n\n\n\n\n#Finding the loglikelihood (Univariate)\n\nimport math\n\nmul1 = 1\nfor row_idx1 in range(0,np1.size - 1):\n mul1 = mul1 * stats.norm.pdf(np1[row_idx1],mu1,sigma1)\n\nmul2 = 1\nfor row_idx2 in range(0,np2.size - 1):\n mul2 = mul2 * stats.norm.pdf(np2[row_idx2],mu2,sigma2)\n\nmul3 = 1\nfor row_idx3 in range(0,np3.size - 1):\n mul3 = mul3 * stats.norm.pdf(np3[row_idx3],mu3,sigma3)\n \nmul4 = 1\nfor row_idx4 in range(0,np4.size - 1):\n mul4 = mul4 * stats.norm.pdf(np4[row_idx4],mu4,sigma4)\n \n \ntotmul = mul1 * mul2 * mul3 * mul4\n\nlogLikelihood1 = math.log(mul1) + math.log(mul2) + math.log(mul3) + math.log(mul4)\n\nprint(\"\\nlogLikelihood(Univariate) = %s\" %logLikelihood1)\n\n\n#Finding the loglikelihood (Multivariate)\n\n\nfrom scipy.stats import multivariate_normal as mvnorm\n\nmultiVariate = 0\n\nmu_Vector = [round(mu1,3),round(mu2,3),round(mu3,3),round(mu4,3)]\n\nfor itr in range(0,49):\n multiVariate = multiVariate + math.log(scipy.stats.multivariate_normal.pdf(df1.iloc[itr,:],mu_Vector,cov,allow_singular=True))\n\nprint(\"\\nlogLikelihood (Multivariate) = %s\" % multiVariate)\n\n\n#converting dataframe into matrix\nnp1 = df1.as_matrix(columns=df1.columns[0:1])\nnp2 = df1.as_matrix(columns=df1.columns[1:2])\nnp3 = df1.as_matrix(columns=df1.columns[2:3])\nnp4 = df1.as_matrix(columns=df1.columns[3:4])\n\n\nprint(\"\\n\\n\\nQuestion2 :Scatter Plots\\n\")\n\n\n#Pairwise Scatter Plots \n\nprint(\"\\n\")\nprint(\"Scatter Plot for CS Score (USNews) VS Research Overhead %\")\nsns.regplot(x=\"CS Score (USNews)\", y=\"Research Overhead %\", data=df1)\nplt.show()\nprint(\"\\n\")\nprint(\"Scatter Plot for CS Score (USNews) VS Admin Base Pay$\")\nsns.regplot(x=\"CS Score (USNews)\", y=\"Admin Base Pay$\", data=df)\nplt.show()\nprint(\"\\n\")\nprint(\"Scatter Plot for CS Score (USNews) % VS Tuition(out-state)$\")\nsns.regplot(x=\"CS Score (USNews)\", y=\"Tuition(out-state)$\", data=df1)\nplt.show()\nprint(\"\\n\")\nprint(\"Scatter Plot for Research Overhead % VS Admin Base Pay$\")\nsns.regplot(x=\"Research Overhead %\", y=\"Admin Base Pay$\", data=df1)\nplt.show() \nprint(\"\\n\")\nprint(\"Scatter Plot for Research Overhead % VS Tuition(out-state)$\")\nsns.regplot(x=\"Research Overhead %\", y=\"Tuition(out-state)$\", data=df1)\nplt.show()\nprint(\"\\n\")\nprint(\"Scatter Plot for Admin Base Pay$ VS Tuition(out-state)$\")\nsns.regplot(x=\"Admin Base Pay$\", y=\"Tuition(out-state)$\", data=df1)\nplt.show()\nprint(\"\\n\")\n\n\n\n#Creating Scatter matrix for Correlation\n\n\n\nfig = plt.figure()\nx1 = fig.add_subplot(111)\nx2 = x1.matshow(corr,vmin = -1,vmax = 1)\nfig.colorbar(x2)\n\n\nx3 = ['CS Score','Research Overhead %','Admin Base Pay$','Tuition(out-state)$']\n\nx1.set_xticklabels(['']+x3)\nx1.set_yticklabels(['']+x3)\n\n\n\nprint(\"\\nScatter Matrix for Correlation Matrix:\\n\")\nplt.show()\nprint(\"\\n\")\n\n\n\n\n\n\n","sub_path":"source_code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"35355922","text":"\n\nimport math\nimport numpy\n\n############################################################\n### Model 1: Use the average speed and angle in a window ###\n############################################################\n\nclass NaivePredictor:\n \"\"\"\n The naive predictor class.\n This predictor takes the training data and a prediction horizon:\n (1) compute the boundary of the box\n (2) for each point in the prediction horizon\n (2.1) takes prior 20 frames.\n (2.2) compute the average velocity, angle, and turning angle in the last 20 frames.\n (2.3) predict the point.\n (2.4) add the predicited point to data (for future prediction)\n (2.3) prediction step consists of 2 steps: (1) predict the point basd on the robot's current position,\n current angle, and current velocity; (2) If the robot is going to hit the wall, bounce the\n robot back based on law of refleciton (incident angle = reflecting angle).\n \"\"\"\n\n # initialize the predictor\n def __init__(self, window_size, horizon):\n self.window_size = window_size\n self.horizon = horizon\n\n # compute the average velocity in the past window of data\n def compute_velocity(self, past_data):\n velocities = []\n for i in range(0, len(past_data) - 1):\n velocities.append(math.sqrt((past_data[i][0] - past_data[i+1][0])**2 +\\\n (past_data[i][1] - past_data[i+1][1])**2))\n return numpy.mean(velocities)\n\n # compute the average turning angle in the past window of data\n def compute_truning_angle(self, past_data):\n turning_angles = []\n past_angle = math.atan2(past_data[0][0] - past_data[1][0], past_data[0][1] - past_data[1][1])\n for i in range(1, len(past_data) - 1):\n current_angle = math.atan2(past_data[i+1][0] - past_data[i][0], past_data[i+1][1] - past_data[i][1])\n turning_angles.append(current_angle - past_angle)\n past_angle = current_angle\n return numpy.mean(turning_angles)\n\n # compute the last angle the car goes in last frame\n def compute_angle(self, past_data):\n x = past_data[-1]\n y = past_data[-2]\n return math.atan2(x[0] - y[0], x[1] - y[1])\n\n def next_position(self, distance, angle, turning_angle, prev_position):\n delta_x = distance * math.sin(angle + turning_angle)\n delta_y = distance * math.cos(angle + turning_angle)\n return [int(prev_position[0] + delta_x), int(prev_position[1] + delta_y)]\n\n # Check whether we reach a boundary, if so, return\n # the new angle and turning angle\n def check_boundary(self, position, angle, boundaries):\n if position[0] < boundaries['left']:\n if angle > math.pi / 4 and angle < math.pi / 2:\n return math.pi / 2 - angle\n if angle > math.pi / 2 and angle < math.pi / 4 * 3:\n return math.pi / 2 * 3 - angle\n if position[0] > boundaries['right']:\n if angle < math.pi / 4:\n return math.pi / 2 - angle\n if angle > math.pi / 4 * 3:\n return math.pi / 2 * 3 - angle\n if position[1] < boundaries['down']:\n if angle > math.pi / 4 * 3:\n return math.pi - angle\n if angle > math.pi / 2 and angle < math.pi / 4 * 3:\n return math.pi - angle\n if position[1] > boundaries['up']:\n if angle > math.pi / 4 and angle < math.pi / 2:\n return math.pi - angle\n if angle < math.pi / 4:\n return math.pi - angle\n return 0\n\n # given past data, we predict the future n frames\n def prediction(self, prediction_horizon, window_size, data):\n if len(data) < window_size:\n return []\n past_data = data[-window_size:]\n angle = self.compute_angle(past_data)\n turning_angle = self.compute_truning_angle(past_data)\n distance = self.compute_velocity(past_data)\n predictions = []\n boundaries = {\"left\" : min([x[0] for x in data]),\\\n \"right\" : max([x[0] for x in data]),\\\n \"up\" : max([x[1] for x in data]),\\\n \"down\" : min([x[1] for x in data])}\n count = 0\n current_position = past_data[-1]\n for i in range(0, prediction_horizon):\n prediction = self.next_position(distance, angle, turning_angle, current_position)\n new_angle = self.check_boundary(prediction, angle, boundaries)\n if new_angle != 0 and count <= 0:\n count = 5\n angle = new_angle\n count -= 1\n predictions.append(prediction)\n current_position = prediction\n return predictions\n\n # The main interface of the funciton. This returns the prediction over the horizon\n def make_prediction(self, input_file_path):\n self.data = [[int(x) for x in line.rstrip('\\r\\n').split(',')] for line in open(input_file_path)]\n training_data = self.data[0:len(self.data)-self.horizon]\n testing_data = self.data[-self.horizon:]\n prediction_data = self.prediction(self.horizon, self.window_size, training_data)\n return [prediction_data, testing_data]","sub_path":"naive_predict.py","file_name":"naive_predict.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"574849643","text":"from xgboost import DMatrix\nimport pickle\nimport matplotlib.pyplot as plt\n\n\ndef train_valid_test(path, dmat=True):\n train, valid, test, task = pickle.load(open(path, 'rb'))\n if dmat:\n train, valid, test = DMatrix(*train), DMatrix(*valid), DMatrix(*test)\n return train, valid, test, task\n\n\ndef train_test(path='traintest', dmat=True):\n # after pickle.dump(((new_train, train_target), (eval_matrix, eval_target)), open('traintest', 'wb'))\n train, test = pickle.load(open(path, 'rb'))\n if dmat:\n train, test = DMatrix(*train), DMatrix(*test)\n return train, test\n\n\ndef plot_comparison(callbacks):\n plt.figure(figsize=(16,9))\n try:\n for callback in callbacks:\n callback.log.plot(ax=plt.gca())\n except TypeError:\n callbacks.log.plot(ax=plt.gca())\n plt.legend()\n plt.show()\n\n\nfrom collections import OrderedDict, Callable\n\n\nclass OrderedDefaultDict(OrderedDict):\n # Source: http://stackoverflow.com/a/6190500/562769\n def __init__(self, default_factory=None, *a, **kw):\n if (default_factory is not None and\n not isinstance(default_factory, Callable)):\n raise TypeError('first argument must be callable')\n OrderedDict.__init__(self, *a, **kw)\n self.default_factory = default_factory\n\n def __getitem__(self, key):\n try:\n return OrderedDict.__getitem__(self, key)\n except KeyError:\n return self.__missing__(key)\n\n def __missing__(self, key):\n if self.default_factory is None:\n raise KeyError(key)\n self[key] = value = self.default_factory()\n return value\n\n def __reduce__(self):\n if self.default_factory is None:\n args = tuple()\n else:\n args = self.default_factory,\n return type(self), args, None, None, self.items()\n\n def copy(self):\n return self.__copy__()\n\n def __copy__(self):\n return type(self)(self.default_factory, self)\n\n def __deepcopy__(self, memo):\n import copy\n return type(self)(self.default_factory,\n copy.deepcopy(self.items()))\n\n def __repr__(self):\n return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,\n OrderedDict.__repr__(self))","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"78512282","text":"# Author: WangYue\n# Data: 2018/9/1 17:42\n\"\"\"\n Description:\n 条件表达式(3元操作符号)\n\"\"\"\nx = 4\n\ny = 7\n\nif x < y:\n\tsmall = x\nelse:\n\tsmall = y\n\nprint(small)\n\nbig = x if x > y else y\nprint(big)\n","sub_path":"START_01/learn_01.py","file_name":"learn_01.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"353253781","text":"from unittest import TestCase\n\nfrom vesper.old_bird.old_bird_detector_redux_1_1 import _TransientFinder\n\n\n_MIN_LENGTH = 100\n_MAX_LENGTH = 400\n_FINAL_FALL = (1000000, False)\n\n\nclass TransientFinderTests(TestCase):\n\n\n def test(self):\n \n cases = [\n \n # no transitions, no transients\n ([], []),\n \n # falls only, no transients\n ([(1000, False)], []),\n ([(1000, False), (1100, False)], []),\n \n # one transient, length less than minimum\n ([(1000, True), (1001, False)], [(1000, 100)]),\n ([(1000, True), (1050, False)], [(1000, 100)]),\n ([(1000, True), (1099, False)], [(1000, 100)]),\n \n # one transient, minimum length\n ([(1000, True), (1100, False)], [(1000, 100)]),\n \n # one transient, length between minimum and maximum\n ([(1000, True), (1200, False)], [(1000, 200)]),\n \n # one transient, maximum length\n ([(1000, True), (1400, False)], [(1000, 400)]),\n \n # one transient, length greater than maximum\n ([(1000, True), (1401, False)], [(1000, 400)]),\n ([(1000, True), (1500, False)], [(1000, 400)]),\n \n # two transients\n ([(1000, True), (1200, False), (1400, True), (1600, False)],\n [(1000, 200), (1400, 200)]),\n \n # two closely spaced transients\n ([(1000, True), (1200, False), (1201, True), (1401, False)],\n [(1000, 200), (1201, 200)]),\n \n # one transient preceded by fall\n ([(500, False), (1000, True), (1200, False)], [(1000, 200)]),\n \n # two consecutive rises separated by less than maximum length\n ([(1000, True), (1100, True), (1200, False)], [(1000, 200)]),\n ([(1000, True), (1399, True)], [(1000, 400)]),\n \n # two consecutive rises separated by exactly maximum length\n # (the second rise is ignored)\n ([(1000, True), (1400, True)], [(1000, 400)]),\n \n # two consecutive rises separated by more than maximum length\n ([(1000, True), (1401, True)], [(1000, 400), (1401, 400)]),\n ([(1000, True), (2000, True), (3000, False)],\n [(1000, 400), (2000, 400)]),\n \n # rise after transient of less than minimum length, not more\n # than one sample past end of minimum length transient\n ([(1000, True), (1010, False), (1020, True)], [(1000, 400)]),\n ([(1000, True), (1010, False), (1099, True)], [(1000, 400)]),\n ([(1000, True), (1010, False), (1100, True)], [(1000, 400)]),\n \n # rise after transient of less than minimum length, more than\n # one sample past end of minimum length transient\n ([(1000, True), (1010, False), (1101, True)],\n [(1000, 100), (1101, 400)]),\n \n # fall after transient of less than minimum length, before\n # end of minimum length transient\n ([(1000, True), (1010, False), (1020, False)], [(1000, 100)]),\n \n ]\n \n for crossings, expected_clips in cases:\n \n # Pass case crossings all at once.\n finder = _TransientFinder(_MIN_LENGTH, _MAX_LENGTH)\n clips = finder.process(crossings)\n clips += finder.complete_processing([_FINAL_FALL])\n self.assertEqual(clips, expected_clips)\n \n # Pass case crossings one at a time.\n finder = _TransientFinder(_MIN_LENGTH, _MAX_LENGTH)\n clips = []\n for crossing in crossings:\n clips += finder.process([crossing])\n clips += finder.complete_processing([_FINAL_FALL])\n self.assertEqual(clips, expected_clips)\n","sub_path":"vesper/old_bird/tests/test_old_bird_detector_redux_1_1.py","file_name":"test_old_bird_detector_redux_1_1.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"151873388","text":"import keras\nimport tensorflow as tf\nimport os\nfrom os.path import join\nimport json\nimport random\nimport itertools\nimport re\nimport datetime\nfrom collections import Counter\n#import cairocffi as cairo\n#import editdistance\n\nimport keras.callbacks\nfrom keras import backend as K\nfrom keras.models import load_model\n\nimport numpy as np\nfrom scipy import ndimage\nimport pylab\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport cv2\n\nfrom utils import *\n\ntrain_dir = os.path.join('data','anpr_ocr__train')\ntest_dir = os.path.join('data','anpr_ocr__test')\n\nc_val = load_json(train_dir, 'val')\nc_train = load_json(train_dir, 'train')\n\nletters_train = set(c_train.keys())\nletters_val = set(c_val.keys())\n\nif letters_train == letters_val:\n print('Letters in train and val do match')\nelse:\n raise Exception()\n# print(len(letters_train), len(letters_val), len(letters_val | letters_train))\nletters = sorted(list(letters_train))\nprint('Letters:', ' '.join(letters))\n\nsess = tf.Session()\nK.set_session(sess)\n\nplate_images = TextImageGenerator(train_dir, 'val', 128, 64, 8, 4, letters)\nplate_images.build_data()\n\nif os.path.exists('model_1.h5'):\n print('True')\n model = train(128, train_dir, letters, load=True)\nelse:\n model = train(128, train_dir, letters, load=False)\n model.save('model_1.h5')\n\nnet_inp = model.get_layer(name='the_input').input\nnet_out = model.get_layer(name='softmax').output\n\nmodel1 = load_model('model_1.h5', compile = False)\n\n#model1.predict(cv2.imread('data\\anpr_ocr__test\\img/A007HA50.png'))\n#model1.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)\n \n\nfor inp_value, _ in plate_images.next_batch():\n bs = inp_value['the_input'].shape[0]\n X_data = inp_value['the_input']\n net_out_value = sess.run(net_out, feed_dict={net_inp:X_data})\n pred_texts = decode_batch(net_out_value, letters)\n labels = inp_value['the_labels']\n texts = []\n for label in labels:\n text = ''.join(list(map(lambda x: letters[int(x)], label)))\n texts.append(text)\n \n for i in range(bs):\n fig = plt.figure(figsize=(10, 10))\n outer = gridspec.GridSpec(2, 1, wspace=10, hspace=0.1)\n ax1 = plt.Subplot(fig, outer[0])\n fig.add_subplot(ax1)\n fig.add_subplot(ax2)\n print('Predicted: %s\\nTrue: %s' % (pred_texts[i], texts[i]))\n img = X_data[i][:, :, 0].T\n ax1.set_title('Input img')\n ax1.imshow(img, cmap='gray')\n ax1.set_xticks([])\n ax1.set_yticks([])\n #ax.axvline(x, linestyle='--', color='k')\n plt.show()\n break\n","sub_path":"Car Plate Recognition/tf_model.py","file_name":"tf_model.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"452502739","text":"#!/usr/bin/python\n\"\"\"\nPandigital products\nProblem 32\nWe shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once; for example, the 5-digit number, 15234, is 1 through 5 pandigital.\n\nThe product 7254 is unusual, as the identity, 39 x 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital.\n\nFind the sum of all products whose multiplicand/multiplier/product identity can be written as a 1 through 9 pandigital.\n\nHINT: Some products can be obtained in more than one way so be sure to only include it once in your sum.\n\"\"\"\n\ndef isPandigital(a):\n count = 10*[0]\n while a != 0:\n if count[a%10] == 1: return False\n count[a%10] += 1\n a /= 10\n return True\n\ndef main():\n sum = 0\n q, t, w, final = [], [], [], []\n for i in range(1, 10000):\n for j in range(1, 10000):\n t = [str(i), str(j), str(i * j)]\n q = \"\".join(t)\n if isPandigital(int(q)) and len(q) == 9 and not \"0\" in str(q):\n print(i, \"\\t\", t[2])\n w.append(t[2])\n w = list(set(w))\n for i in range(len(w)):\n sum = sum + int(w[i])\n print(sum)\n\nif __name__ == \"__main__\":\n main()","sub_path":"src/main/python/projecteuler/problem032.py","file_name":"problem032.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"222460669","text":"from sys import modules\nfrom unittest import TestCase\nfrom unittest.mock import call, MagicMock, Mock, patch\n\nfrom command_handler import CommandHandler\n\n\nclass CommandHandlerTest(TestCase):\n def testCommandHandlerReturnsInstanceOfCommandHandler(self):\n app = MagicMock()\n\n self.assertIsInstance(CommandHandler(app), CommandHandler)\n\n def testInitCallsAddUrlRuleOnGivenApplication(self):\n app = MagicMock()\n CommandHandler(app)\n\n app.add_url_rule.assert_called()\n\n def testInitCallsAddUrlRuleOnGivenApplicationWithUrl(self):\n app = MagicMock()\n CommandHandler(app)\n\n args, kwargs = app.add_url_rule.call_args\n assert \"rule\" in kwargs or len(args) >= 1\n\n url = kwargs.get(\"rule\") if \"rule\" in kwargs else args[0]\n self.assertEqual(url, \"/command\")\n\n def testInitCallsAddUrlRuleOnGivenApplicationWithPrefixedUrl(self):\n app = MagicMock()\n CommandHandler(app, rulePrefix=\"/foo\")\n\n args, kwargs = app.add_url_rule.call_args\n url = kwargs.get(\"rule\") if \"rule\" in kwargs else args[0]\n self.assertEqual(url, \"/foo/command\")\n\n def testInitCallsAddUrlRuleOnGivenApplicationWithSuffixUrl(self):\n app = MagicMock()\n CommandHandler(app, ruleSuffix=\"bar\")\n\n args, kwargs = app.add_url_rule.call_args\n url = kwargs.get(\"rule\") if \"rule\" in kwargs else args[0]\n self.assertEqual(url, \"/bar\")\n\n def testInitCallsAddUrlRuleOnGivenApplicationWithViewFunc(self):\n app = MagicMock()\n\n CommandHandler(app)\n\n args, kwargs = app.add_url_rule.call_args\n assert \"view_func\" in kwargs or len(args) >= 3\n\n view_func = kwargs.get(\"view_func\") if \"view_func\" in kwargs else args[2]\n with patch(\"command_handler.views.Invoker.dispatch_request\") as dispatcher:\n view_func()\n\n dispatcher.assert_called()\n\n def testInitCallsAddUrlRuleOnGivenApplicationWithPostAsOnlyAllowedMethod(self):\n app = MagicMock()\n CommandHandler(app)\n\n args, kwargs = app.add_url_rule.call_args\n assert \"methods\" in kwargs\n\n methods = kwargs.get(\"methods\")\n self.assertIsInstance(methods, list)\n self.assertEqual(len(methods), 1)\n assert \"POST\" in methods\n\n def testInitPassesValidatorParamToInvokerInitializer(self):\n app = MagicMock()\n validators = [\"foo\", \"bar\"]\n\n CommandHandler(app, validators=validators)\n\n args, kwargs = app.add_url_rule.call_args\n view_func = kwargs.get(\"view_func\") if \"view_func\" in kwargs else args[2]\n with patch(\"command_handler.views.Invoker.dispatch_request\"):\n with patch.object(view_func, \"view_class\", Mock(\"command_handler.views.Invoker\")) as view_class:\n view_func()\n\n args, kwargs = view_class.call_args\n assert \"validators\" in kwargs\n self.assertListEqual(kwargs[\"validators\"], validators)\n\n def testInitCreatesDifferentViewsWithGivenNamesAndDifferentRegistries(self):\n app = MagicMock()\n\n with patch(\"command_handler.views.Invoker.as_view\") as view:\n ch1 = CommandHandler(app)\n ch2 = CommandHandler(app, rulePrefix=\"/foo\")\n ch3 = CommandHandler(app, ruleSuffix=\"bar\")\n\n view.assert_has_calls([\n call(\"__command_handler_invoker_view__command\", registry=ch1.registry),\n call(\"__command_handler_invoker_view_/foo_command\", registry=ch2.registry),\n call(\"__command_handler_invoker_view__bar\", registry=ch3.registry),\n ])\n self.assertNotEqual(ch1.registry, ch2.registry)\n self.assertNotEqual(ch2.registry, ch3.registry)\n self.assertNotEqual(ch1.registry, ch3.registry)\n\n def testAddHandlerWrappsGivenMethodWithHandlerClassAndAddsItToTheRegistry(self):\n def handler():\n pass\n\n def handlerDecorator():\n return handler()\n\n ch = CommandHandler(MagicMock())\n ch.registry = Mock()\n extras = {\n \"transformer\": lambda x: x,\n \"postProcessor\": lambda x: None,\n }\n\n with patch.object(modules[\"command_handler.handler\"], \"Handler\") as Handler:\n Handler.return_value = handlerDecorator\n ch.addHandler(handler, \"foo.bar\", {}, **extras)\n\n Handler.assert_called()\n Handler.assert_has_calls([call(handler, **extras)])\n\n ch.registry.add.assert_called()\n ch.registry.add.assert_has_calls([call(\"foo.bar\", handlerDecorator, {})])\n","sub_path":"src/tests/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"181999053","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import JsonResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom Aplicaciones.User.mixins import ValidatePermissionRequiredMixin\nfrom django.views.generic import ListView,CreateView,UpdateView,DeleteView\n\n\nfrom Aplicaciones.Producto.forms import *\nfrom Aplicaciones.Producto.models import *\nfrom Aplicaciones.User.mixins import ValidatePermissionRequiredMixin\n\n# Create your views here.\n#Listar\nclass ProductoListView(LoginRequiredMixin, ValidatePermissionRequiredMixin,ListView):\n model= Producto\n template_name='producto/list.html'\n permission_required = 'view_producto'\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'searchdata':\n data =[]\n posicion = 1#para listar en orden los numeros del id\n for i in Producto.objects.all():\n item = i.toJSON()\n item['posicion'] = posicion\n data.append(item)\n posicion += 1\n else:\n data['error'] = 'Ha ocurrido un error'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data, safe=False)\n\n def get_context_data(self,**kwargs):\n context= super().get_context_data(**kwargs)\n context['title']='Listado de Producto'\n context['create_url']=reverse_lazy('Producto:producto_create')\n context['list_url'] = reverse_lazy('Producto:producto_list')\n context['entity']='Producto'\n return context\n\n# Crear\nclass ProductoCreateView(LoginRequiredMixin, ValidatePermissionRequiredMixin,CreateView):\n model=Producto\n form_class = ProductoForm\n template_name='producto/create.html'\n permission_required = 'add_producto'\n success_url = reverse_lazy('Producto:producto_list')\n\n #@method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'add':\n form = self.get_form()\n data = form.save()\n else:\n data['error'] = 'Ha ocurrido un error'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self,**kwargs):\n context= super().get_context_data(**kwargs)\n context['title']='Creación de Producto'\n context['entity'] = 'Producto'\n context['list_url'] = reverse_lazy('Producto:producto_list')\n context['action'] = 'add'\n return context\n# Editar\nclass ProductoUpdateView(LoginRequiredMixin, ValidatePermissionRequiredMixin,UpdateView):\n model=Producto\n form_class = ProductoForm\n template_name='Producto/create.html'\n permission_required = 'change_producto'\n success_url = reverse_lazy('producto_list')\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'edit':\n form = self.get_form()\n data = form.save()\n else:\n data['error'] = 'No ha ingresado a ninguna opción'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data, safe=False)\n \n def get_context_data(self,**kwargs):\n context= super().get_context_data(**kwargs)\n context['title']='Editar Producto'\n context['entity']='Producto'\n context['list_url'] = reverse_lazy('Producto:producto_list')\n context['action']='edit'\n return context\n# Eliminar\nclass ProductoDeleteView(LoginRequiredMixin, ValidatePermissionRequiredMixin,DeleteView):\n model=Producto\n template_name='Producto/delete.html'\n permission_required = 'delete_producto'\n success_url = reverse_lazy('Producto:producto_list')\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n self.object.delete()\n except Exception as e:\n data['error'] = 'Este producto ya tiene movimiento no se puede eliminar '\n return JsonResponse(data)\n\n def get_context_data(self,**kwargs):\n context= super().get_context_data(**kwargs)\n context['title']='Eliminacion Producto'\n context['entity']='Producto'\n context['list_url'] = reverse_lazy('Producto:producto_list')\n return context\n\n","sub_path":"Aplicaciones/Producto/views/producto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"49260032","text":"# -*-coding:utf-8 -*-\n#Reference:**********************************************\n# @Time    : 2019-11-07 23:40\n# @Author  : Fabrice LI\n# @File    : 20191107_123_word_search.py\n# @User    : liyihao\n# @Software : PyCharm\n# @Description: Given a 2D board and a word, find if the word exists in the grid.\n#\n# The word can be constructed from letters of sequentially adjacent cell,\n# where \"adjacent\" cells are those horizontally or vertically neighboring.\n# The same letter cell may not be used more than once.\n#Reference:**********************************************\n'''\nE.g\nInput:[\"ABCE\",\"SFCS\",\"ADEE\"],\"ABCCED\"\nOutput:true\nExplanation:\n[\n A B C E\n S F C S\n A D E E\n]\n(0,0)A->(0,1)B->(0,2)C->(1,2)C->(2,2)E->(2,1)D\n\nInput:[\"z\"],\"z\"\nOutput:true\nExplanation:\n[ z ]\n(0,0)z\n'''\nclass Solution:\n \"\"\"\n @param board: A list of lists of character\n @param word: A string\n @return: A boolean\n \"\"\"\n # 方向数组\n direction = [(0, -1), (0, 1), (1, 0), (-1, 0)]\n def exist(self, board, word):\n row = len(board)\n if not row:\n return False\n column = len(board[0])\n dp = [[0 for _ in range(column)] for _ in range(row)]\n for r in range(row):\n for c in range(column):\n if self.found_word(board, word, 0, r, c, dp, row, column):\n return True\n return False\n\n def found_word(self, board, word, index, cur_row, cur_column, dp, row, column):\n if index == len(word) - 1:\n return board[cur_row][cur_column] == word[index]\n if board[cur_row][cur_column] == word[index]:\n dp[cur_row][cur_column] = 1\n for d in self.direction:\n new_row = cur_row + d[0]\n new_column = cur_column + d[1]\n if 0 <= new_row < row \\\n and 0 <= new_column < column \\\n and not dp[new_row][new_column] \\\n and self.found_word(board, word, index + 1, new_row, new_column, dp, row, column):\n return True\n dp[cur_row][cur_column] = 0\n return False\n\n\nif __name__ == '__main__':\n s = Solution()\n board = [\"ABCE\", \"SFCS\", \"ADEE\"]\n word = \"ABCCED\"\n print(s.exist(board, word))\n","sub_path":"LintCode/DFS/20191107_123_word_search.py","file_name":"20191107_123_word_search.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"122478770","text":"\"\"\"A util script used on daryl's laptop to switch 'iemdb' /etc/hosts entry\n\n129.186.185.33 iemdb iemdb2\n#127.0.0.1 iemdb iemdb2\n\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport tempfile\nimport os\n\nMETVM4, METVM5, METVM6 = range(3)\nIPS = ['172.16.170.1', '172.16.171.1', '172.16.172.1']\nLOOKUP = {\n \"\": IPS[METVM6],\n \"-awos\": IPS[METVM4],\n \"-hads\": IPS[METVM4],\n \"-iemre\": IPS[METVM6],\n \"-mos\": IPS[METVM5],\n \"-nldn\": IPS[METVM5],\n \"-radar\": IPS[METVM5],\n \"-smos\": IPS[METVM5],\n \"-snet\": IPS[METVM5],\n \"-talltowers\": IPS[METVM5],\n}\n\n\ndef main(argv):\n \"\"\"Go Main Go\"\"\"\n if len(argv) == 1:\n print('Usage: python set_iemdb_etc_hosts.py ')\n return\n data = open('/etc/hosts').read()\n result = []\n for line in data.split(\"\\n\"):\n result.append(line)\n if line.startswith('# ---AUTOGEN---'):\n print(\"Found ---AUTOGEN---\")\n break\n for dbname in LOOKUP:\n ip = LOOKUP[dbname] if argv[1] == 'proxy' else '127.0.0.1'\n print(\"%s -> %s\" % (dbname, ip))\n result.append(\"%s iemdb%s.local\" % (ip, dbname))\n (tmpfd, tmpfn) = tempfile.mkstemp()\n os.write(tmpfd, ('\\n'.join(result)).encode('ascii'))\n os.write(tmpfd, b'\\n')\n os.close(tmpfd)\n os.rename(tmpfn, '/etc/hosts')\n os.chmod('/etc/hosts', 0o644)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scripts/util/set_iemdb_etc_hosts.py","file_name":"set_iemdb_etc_hosts.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"363821012","text":"#\r\n# [905] Length of Longest Fibonacci Subsequence\r\n#\r\n# https://leetcode.com/problems/length-of-longest-fibonacci-subsequence/description/\r\n#\r\n# algorithms\r\n# Medium (41.14%)\r\n# Total Accepted: 4.4K\r\n# Total Submissions: 10.7K\r\n# Testcase Example: '[1,2,3,4,5,6,7,8]'\r\n#\r\n# A sequence X_1, X_2, ..., X_n is fibonacci-like if:\r\n#\r\n#\r\n# n >= 3\r\n# X_i + X_{i+1} = X_{i+2} for all i + 2 <= n\r\n#\r\n#\r\n# Given a strictly increasing array A of positive integers forming a sequence,\r\n# find the length of the longest fibonacci-like subsequence of A.  If one does\r\n# not exist, return 0.\r\n#\r\n# (Recall that a subsequence is derived from another sequence A by deleting any\r\n# number of elements (including none) from A, without changing the order of the\r\n# remaining elements.  For example, [3, 5, 8] is a subsequence of [3, 4, 5, 6,\r\n# 7, 8].)\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# Example 1:\r\n#\r\n#\r\n# Input: [1,2,3,4,5,6,7,8]\r\n# Output: 5\r\n# Explanation:\r\n# The longest subsequence that is fibonacci-like: [1,2,3,5,8].\r\n#\r\n#\r\n# Example 2:\r\n#\r\n#\r\n# Input: [1,3,7,11,12,14,18]\r\n# Output: 3\r\n# Explanation:\r\n# The longest subsequence that is fibonacci-like:\r\n# [1,11,12], [3,11,14] or [7,11,18].\r\n#\r\n#\r\n#\r\n#\r\n# Note:\r\n#\r\n#\r\n# 3 <= A.length <= 1000\r\n# 1 <= A[0] < A[1] < ... < A[A.length - 1] <= 10^9\r\n# (The time limit has been reduced by 50% for submissions in Java, C, and C++.)\r\n#\r\n#\r\nclass Solution:\r\n def lenLongestFibSubseq(self, A):\r\n \"\"\"\r\n :type A: List[int]\r\n :rtype: int\r\n \"\"\"\r\n if not A:\r\n return 0\r\n d = set()\r\n visit = {}\r\n for a in A:\r\n d.add(a)\r\n visit[a] = set()\r\n res = 0\r\n maxV = max(A)\r\n for i in range(len(A)):\r\n for j in range(i + 1, len(A)):\r\n cur = 2\r\n a = A[i]\r\n b = A[j]\r\n # if a+b>maxV, no need to check following\r\n # cut branch\r\n if a + b > maxV:\r\n break\r\n if b not in visit[a]:\r\n visit[a].add(b)\r\n while a + b in d:\r\n t = b\r\n b = a + b\r\n a = t\r\n if b not in visit[a]:\r\n visit[a].add(b)\r\n else:\r\n # must be shorter than before, cut branch\r\n break\r\n cur += 1\r\n res = max(res, cur)\r\n return res if res > 2 else 0\r\n\r\n\r\ndef main():\r\n print(Solution().lenLongestFibSubseq([1, 3, 7, 11, 12, 14, 18]))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Medium/873.length-of-longest-fibonacci-subsequence.python3.py","file_name":"873.length-of-longest-fibonacci-subsequence.python3.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"210882812","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Aghiles Salah\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom ..utils.util_functions import which_\nfrom .evaluation_strategy import EvaluationStrategy\nimport sys\n\n\nclass Split(EvaluationStrategy):\n \"\"\"Evaluation Strategy Split. \n\n Parameters\n ----------\n data: scipy sparse matrix, required\n The user-item preference matrix.\n\n prop_test: float, optional, default: 0.2\n The propotion of the test set, \\\n if > 1 then it is treated as the size of the test set.\n\n prop_validation: float, optional, default: 0.0\n The propotion of the validation set, \\\n if > 1 then it is treated as the size of the validation set.\n\n good_rating: float, optional, default: 1\n The minimum value that is considered to be a good rating, \\\n e.g, if the ratings are in {1, ..., 5}, then good_rating = 4.\n\n data_train: ..., optional, default: None\n The training data.\n\n data_validation: ..., optional, default: None\n The validation data.\n\n data_test: ..., optional, default: None\n The test data.\n\n index_train: 1d array, optional, default: None\n The indexes of training data (starting from 0).\n\n index_validation: 1d array, optional, default: None\n The indexes of validation data (starting from 0).\n\n index_test: 1d array, optional, default: None\n The indexes of test data (starting from 0).\n\n data_train_bin: ..., default: None\n The binary training data.\n\n data_validation_bin: ..., default: None\n The binary validation data.\n\n data_test_bin: ..., default: None\n The binary test data.\n \"\"\"\n \n def __init__(self, data,prop_test=0.2,prop_validation=0.0,good_rating = 1., data_train=None, data_validation=None, data_test=None,index_train = None,index_validation = None,index_test = None):\n EvaluationStrategy.__init__(self, data,good_rating = good_rating, data_train=data_train, data_validation=data_validation, data_test=data_test)\n self.prop_test = prop_test\n self.prop_validation = prop_validation\n #may be move these attributes to the parent class \n self.index_train = index_train\n self.index_validation = index_validation\n self.index_test = index_test\n #Additional attributes, \n self.split_ran = False #check whether the data is already split or not \n self.rank_met = False #Check wether there is no ranking metric to save some computation\n \n \n def train_test_split_(self):\n\n print(\"Spliting the data\")\n n = self.data_nnz\n if self.prop_test > 1:\n print(\"\\'prop_test\\'>1 and is treated as the size of the test data\")\n if self.prop_test > n:\n sys.exit(\"\\'prop_test\\' is greater than the number of users\")\n else:\n size_train = n - int(self.prop_test)\n else:\n size_train = int(np.round((1-self.prop_test)*n))\n \n index_train = np.random.choice(n, size=size_train,replace=False,p=None) #sample without replacement\n index_test = np.where(np.invert(np.in1d(np.array(range(n)), index_train)))[0] #index_test are the indices which are not in index_train\n \n return index_train, index_test\n \n \n \n\n def run_(self):\n \n #Building train and test sets\n \n if self._data_train is None or self._data_test is None:\n \n if self.index_train is None or self.index_test is None:\n self.index_train, self.index_test = self.train_test_split_() \n \n #preparing training set, creating the training sparse matrix \n print(\"Preparing training data\")\n train_data = self.data[self.index_train,:]\n id_train_users = np.array(train_data[:,0],dtype='int64').flatten() \n id_train_items = np.array(train_data[:,1],dtype='int64').flatten() \n ratings_train = np.array(train_data[:,2],dtype='float64').flatten()\n self._data_train = sp.csc_matrix((ratings_train, (id_train_users,id_train_items)),shape=(self.data_nrows, self.data_ncols))\n del(id_train_users,id_train_items,ratings_train)\n self._data_train.eliminate_zeros()\n self._data_train = sp.csc_matrix(self._data_train)\n \n \n #preparing test set\n print(\"Preparing test data\")\n test_data = self.data[self.index_test,:]\n id_test_users = np.array(test_data[:,0],dtype='int64').flatten()\n id_test_items = np.array(test_data[:,1],dtype='int64').flatten()\n ratings_test = np.array(test_data[:,2],dtype='float64').flatten()\n self._data_test = sp.csc_matrix((ratings_test, (id_test_users,id_test_items)),shape=(self.data_nrows, self.data_ncols)) \n self._data_test.eliminate_zeros()\n self._data_test = sp.csc_matrix(self.data_test) \n \n \n #Binary train data, useful to get some stats, such as the number of ratings per user\n self._data_train_bin = self._data_train.copy() # always use copy() with sparse matrices affectations (the assignement is done in variables) \n self._data_train_bin.data = np.full(len(self._data_train_bin.data),1)\n #update this binarization process\n \n \n #Binary test data, useful for ranking and top@M evaluation\n self._data_test_bin = self._data_test.copy()\n self._data_test_bin.data[which_(self.data_test_bin.data,'<',self.good_rating)] = 0. \n self._data_test_bin.eliminate_zeros() \n self._data_test_bin.data = np.full(len(self.data_test_bin.data),1)\n self.split_ran = True\n\n\n #This function is callable from the experiement class so as to run an experiment \n def run_exp(self, model, metrics):\n #check wether we have at least one ranking metric\n for mt in metrics:\n if mt.type == 'ranking':\n self.rank_met = True\n break\n\n \n if not self.split_ran:\n self.run_()\n \n \n model.fit(self.data_train)\n print(\"Starting evaluation\")\n res = sp.csc_matrix((self.data_test.shape[0],len(metrics)+1)) #this matrix will contain the evaluation results for each user\n \n #evaluation is done user by user to avoid memory errors on large datasets. \n #loops are inefficent in python, this part should be re-implement in cython or c/c++\"\"\"\n nb_processed_users = 0\n for u in range(self.data_test.shape[0]):\n if not np.sum(self.data_test_bin[u,:]): #users with 0 heldout items should not be consider in the evaluation\n nb_processed_users +=1\n else:\n pred_u = model.predict(index_user=u)\n pred_u[which_(self.data_train[u,:].todense().A1,\">\",0)] = 0. #remove known ratings #.A1 allows to flatten a dense matrix\n if self.rank_met:\n rec_list_u = (-pred_u).argsort() #ordering the items (in decreasing order) according to the predictions\n \n #computing the diffirent metrics\n idx = 0\n for mt in metrics:\n if mt.type == 'ranking':\n res[u,idx] = mt.compute(data_test = self.data_test_bin[u,:].todense().A1, reclist=rec_list_u)\n else:\n res[u,idx] = mt.compute(data_test = self.data_test[u,:].todense().A1, prediction=pred_u)\n idx = idx + 1\n res[u,len(metrics)] = 1 # This column indicates whether a user have been preprocessed\n nb_processed_users +=1\n if nb_processed_users % 1000 == 0:\n print(nb_processed_users,\"processed users\")\n #computing the average results \n res_avg = res[which_(res[:,len(metrics)].todense().A1,\">\",0),:].mean(0).A1 # of type array\n res_tot = {\"ResAvg\":res_avg[0:len(metrics)],\"ResPerUser\": res}\n return res_tot ","sub_path":"cornac/evaluation_strategies/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":8079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"444149461","text":"import torch\nimport pandas\nfrom transform import DataLabelCompose, ToTensor, AdditiveUniform,\\\n AdditiveUniformTriary, Compose, ToType\nfrom data_util.metagx_util import load_metagx_dataset\nfrom data_util import util, metagx_util\n\n\nclass GeneDataset(torch.utils.data.Dataset):\n def __init__(self, features, labels, transform=None, binary=0):\n self.features = features\n self.labels = labels\n assert len(features) == len(labels)\n self.transform = transform\n self.binary = binary\n\n def __getitem__(self, idx):\n return self.transform(data=self.features.iloc[idx],\n target=self.labels.iloc[idx])\n\n def __len__(self):\n return len(self.features)\n\n\ndef get_merged_common_dataset(opt, skip_study=None, dataset_dict_cache=[], data_cache=[]):\n cancer_data_dir = opt.curated_breast_data_dir\n if dataset_dict_cache:\n dataset_dict = dataset_dict_cache[0]\n else:\n dataset_dict = util.load_curated(cancer_data_dir)\n dataset_dict_cache.append(dataset_dict)\n mergedCurated = dataset_dict['merged'].copy()\n\n if data_cache:\n data = data_cache[0]\n else:\n data = metagx_util.load_metagx_dataset(opt.metagx_data_dir, min_genes=opt.min_genes)\n data_cache.append(data)\n merged = data['merged'].copy()\n genes_list = data['genes_features'].copy()\n\n metagx_pos_outcome = merged[merged.posOutcome.isin([-1, 1])]\n print('num pos outcome studies {0}'.format(len(metagx_pos_outcome.study.unique())))\n if skip_study is not None:\n study_to_skip = metagx_pos_outcome.study.unique()[skip_study]\n else:\n study_to_skip = None\n\n merged_common = util.merge_metagx_curated(merged, mergedCurated)\n\n merged_treatments = list(metagx_util.treatment_columns_metagx) + util.treatment_columns_bmc\n merged_treatments = [x for x in merged_treatments if x in merged_common]\n merged_treatments = list(set(merged_treatments))\n # add continious covariates to genes\n cont_columns = [x for x in merged_treatments if len(merged_common[x].unique()) > 20]\n merged_treatments = [x for x in merged_treatments if x not in cont_columns]\n common_genes_list = [x for x in genes_list if x in merged_common]\n if opt.use_covars:\n non_genes = cont_columns + merged_treatments + ['posOutcome']\n else:\n non_genes = []\n if study_to_skip is None:\n train_data, train_labels, val_data, val_labels = util.random_split(merged_common,\n common_genes_list + non_genes,\n ['study', 'posOutcome'],\n balance_validation=False,\n balance_by_study=False,\n ratio=opt.test_ratio,\n to_numpy=False)\n else:\n train_data, train_labels, val_data, val_labels = next(util.split_by_study(merged_common,\n common_genes_list + non_genes,\n ['study', 'posOutcome'],\n study=study_to_skip,\n to_numpy=False))\n # it's ok to use gene expression in unsupervised model\n copy = val_data.copy()\n copy.loc[:, non_genes] = 0\n val_copy = val_labels.copy()\n val_copy.loc[:, 'posOutcome'] = 0\n train_data = pandas.concat([train_data, copy], ignore_index=True)\n train_labels = pandas.concat([train_labels, val_copy], ignore_index=True)\n print('validation study {0}'.format(study_to_skip))\n print(val_data.shape)\n\n train_data.fillna(0, inplace=True)\n val_data.fillna(0, inplace=True)\n to_tensor = ToTensor()\n to_float = ToType('float')\n add_age = AdditiveUniform(-0.5, 0.5, 'age')\n add_tumor_size = AdditiveUniform(-0.5, 0.5, 'tumor_size')\n add_posOutcome = AdditiveUniformTriary(0.0, 0.05, 'posOutcome')\n add_treat = Compose([AdditiveUniformTriary(0.0, 0.05, x) for x in merged_treatments])\n lst = []\n if 'posOutcome' in train_data.columns:\n lst = [add_age, add_tumor_size, add_posOutcome, add_treat]\n compose = Compose(lst + [to_tensor, to_float])\n compose_label = Compose([add_posOutcome, to_tensor, to_float])\n num_binary = len(merged_treatments + ['posOutcome'])\n num_binary = 0\n transform = DataLabelCompose(compose, compose_label)\n\n train_set = GeneDataset(train_data, train_labels, transform, binary=num_binary)\n test_set = GeneDataset(val_data, val_labels, transform, binary=num_binary)\n return train_set, test_set\n\n\ndef get_metagx_dataset(ratio=0.1):\n data = load_metagx_dataset('/home/noskill/projects/cancer/data/metaGxBreast/', min_genes=5000)\n merged = data['merged']\n genes_list = data['genes_features']\n\n train_data, train_labels, val_data, val_labels = util.random_split(merged,\n genes_list,\n ['study'],\n balance_validation=False,\n balance_by_study=False,\n ratio=ratio)\n to_tensor = ToTensor()\n transform = DataLabelCompose(to_tensor, to_tensor)\n\n # assert val_labels.mean() == 0.5\n train_set = GeneDataset(train_data, train_labels, transform)\n test_set = GeneDataset(val_data, val_labels, transform)\n return train_set, test_set\n\n\n","sub_path":"ml/neural/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"615281915","text":"### AmoCRM.py - класс для работы с AmoCRM\nimport datetime as dt\nimport json\nimport os\nimport pandas as pd\nimport dotenv\nimport config #на время разработки. Там загружается токен API AmoCRM\nimport requests\nimport operator\nfrom time import sleep\nimport shutil\n\nfrom loguru import logger\n\nclass AmoCRM:\n \"\"\"\n Взаимодействие с AmoCRM API\n Загрузка, обработка и выгрузка информации о сделках\n \"\"\"\n CONFIG = {\n 'AMO_DEAL_FIELDS_SCHEMA': 'amo_deal_fields_schema.json',#файл со схемой полей сделок Amo\n 'DEALS_RAW_JSON_PATH': 'amo_leads_raw_json/',\n 'DEALS_TEMP_JSON_PATH': 'amo_leads_temp_json/',\n 'DEALS_WEEK_JSON_PATH': 'amo_leads_week_json/',\n 'JSON_DEALS_RAW_FILENAME': 'json_deals_raw',\n 'DEALS_WEEK_JSON_FILENAME': 'json_deals_week',\n 'ALL_LEADS_EXT_JSON_FILENAME': 'amocrm_all_leads_ext_json',\n 'PAGE_SIZE': 250,#Количество сделок, запрашиваемых из Amo за один запрос\n 'PAGES_COUNT_PER_LOAD': 50, #Количество страниц размером AMO_PAGE_SIZE, которое будем подгружать за один запуск скрипта (для случаем, когда нам нужно выгрузить из AMO много сделок)\n 'PAUSE_BETWIN_REQUESTS': 2, #Пауза, между запросами пачек сделок по API AmoCRM\n 'SUBDOMAIN': 'syn',\n 'AMO_ACCESS_TOKEN': '',\n 'AMO_REDIRECT_URI': 'https://hook.integromat.com/78sigwp948jnsjf2ndodfctwc3yuechm',\n 'AMO_API_REQUESTS_HEADERS': {'User-Agent': 'amoCRM-oAuth-client/1.0',\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + 'AMO_ACCESS_TOKEN'},\n\n 'AMO_API_REQUESTS_ERRORS': {\n 400: 'Bad request',\n 401: 'Unauthorized',\n 403: 'Forbidden',\n 404: 'Not found',\n 500: 'Internal server error',\n 502: 'Bad gateway',\n 503: 'Service unavailable'},\n \n 'TIME_FORMAT': '%Y-%m-%d %H:%M:%S',\n 'WEEK_OFFSET': dt.timedelta(hours=24 - 24 + 6 - 6), #Сдвиг начала недели (для отчётов с нестандартными неделями)\n 'AMO_DEALS_BASE_FIELDS': {'id', \n 'created_at', \n 'updated_at', \n 'pipeline_id', \n 'status_id',\n 'price',\n 'responsible_user_id',\n 'group_id',\n 'loss_reason_id',\n 'created_by',\n 'updated_by',\n 'closed_at',\n 'is_deleted'},\n\n #Пользовательские поля в Амо с кодами полей\n 'AMO_DEALS_CUSTOM_FIELDS': {'city': 512318, #город клиента, проставляемый менеджерами в AmoCRM\n 'tilda_city':648980, #город, на который был ориентирован лендинг на Тильде\n 'ct_city': 648274, #город, определившийся CallTouch\n 'drupal_utm': 632884, #Строка с utm-метками, пробрасываемая Друпалом в Амо\n 'tilda_utm_source': 648158, #utm_source, пробрасываемая Тильдой в Амо\n 'tilda_utm_medium': 648160,\n 'tilda_utm_campaign': 648310,\n 'tilda_utm_content': 648312,\n 'tilda_utm_term': 648314,\n 'ct_utm_source': 648256, #utm_source, проставляемый CallTouch\n 'ct_utm_medium': 648258,\n 'ct_utm_campaign': 648260,\n 'ct_utm_content': 648262,\n 'ct_utm_term': 648264,\n 'channel': 600935,\n 'items_2019': 562024, #Старое значение поля Услуга в AmoCRM\n 'items_2020': 648028, #Новое значение поля Услуга в AmoCRM\n 'tilda_product': 648152, #Метка продукта в Тильде\n 'drupal_piwik_id': 589816,#piwik_id пробрасываемый Друпалом\n 'tilda_piwik_id': 648530,\n 'drupal_google_id': 589818,\n 'ct_google_id': 648292,\n 'ct_yandex_id':648294,\n 'ct_calltouch_session_id': 648288,\n 'tilda_calltouch_session_id': 648532,\n 'ct_calltouch_client_id': 648290,\n 'tilda_cookies': 648166,\n 'drupal_page': 587868, \n 'tilda_page':648556,\n 'ct_page': 648268,\n 'tilda_form_id': 648164,\n 'tilda_form_name': 648162,\n 'tilda_referer': 648168,\n 'ct_referer': 648266,\n 'ct_create_from': 648218,\n 'ct_type_communication': 648220,\n 'ct_client_phone_number': 648224,\n 'ct_communication_date': 648232,\n 'ct_communication_time': 648234,\n 'ct_call_long': 648236,\n 'ct_call_waiting': 648238,\n 'ct_device': 648276,\n 'ct_os': 648278,\n 'ct_browser': 648280,\n 'ct_call_id': 648282},\n\n #Идентификаторы воронк AmoCRM\n 'AMO_PIPELINES_ID': {\"CNTX\": 7038,\n \"WEB\": 28752},\n\n #pipeline_id: trash_status_id\n 'AMO_TRASH_STATUSES_ID': {7038: 28985871, #CNTX\n 28752: 29160522}, #WEB\n\n #Поля, вычисляемые из полей сделок в AmoCRM\n 'DEALS_SPECIAL_FIELDS': {'created_at_timestamp',\n 'updated_at_timestamp'\n 'trashed_at',\n 'lead_utm_source',\n 'lead_utm_medium',\n 'lead_utm_campaign',\n 'lead_utm_content',\n 'lead_utm_term',\n 'lead_device',\n 'lead_browser',\n 'lead_os'}\n } \n\n def __init__(self, config=None):\n self.CONFIG = {}\n if config:\n self.CONFIG.update(config)\n else:\n self.CONFIG.update(AmoCRM.CONFIG)\n self.CONFIG['AMO_ACCESS_TOKEN'] = os.getenv('AMO_ACCESS_TOKEN')\n self.CONFIG['AMO_API_REQUESTS_HEADERS']['Authorization'] = 'Bearer ' + os.getenv('AMO_ACCESS_TOKEN')\n\n #========================\n #Авторизация в AMO CRM\n #---------------------\n #обновление токена\n def _refresh_access_token(self):\n url = 'https://hook.integromat.com/957pemos5degb894sr6kocv4dxtmbuql'\n params = {'pass': os.getenv('FDPASS')}\n headers = {\n \"User-Agent\": \"Synapse_user_agent\",\n \"Content-Type\": \"application/json\"} \n rs = requests.post(url, params=params, headers=headers)\n #print(rs.text)\n os.environ['AMO_ACCESS_TOKEN'] = json.loads(rs.text)['access_token']\n self.CONFIG['AMO_ACCESS_TOKEN'] = os.environ['AMO_ACCESS_TOKEN']\n self.CONFIG['AMO_API_REQUESTS_HEADERS']['Authorization'] = 'Bearer ' + os.getenv('AMO_ACCESS_TOKEN')\n dotenv_file = dotenv.find_dotenv()\n dotenv.set_key(dotenv_file, 'AMO_ACCESS_TOKEN', json.loads(rs.text)['access_token'])\n dotenv.set_key(dotenv_file, 'AMO_REFRESH_TOKEN', json.loads(rs.text)['refresh_token'])\n\n #--------------------\n #Конец блока авторизации в Amo CRM\n #=======================\n \n #Парсинг json_deals в набор строк\n #каждая строка - информация об одной сделке\n def _extract_amo_json_deals_to_rows(self, json_deals, pipelines_list):\n rows = []\n #Проходим по сделкам и наполняем amo_deals rows\n for deal in json_deals:\n if deal['pipeline_id'] in pipelines_list:\n rows.append(self._get_row_from_amo_json_deal(deal))\n # rows += self._get_row_from_amo_json_deal(deal)\n return rows\n \n #Наполняю таблицу сделками из JSON-файла AMO JSON DEALS\n def extract_amo_json_file_to_rows(self, json_path, json_filename):\n #Читаем json-файл\n logger.debug(f'Читаем файл {json_path + json_filename}')\n with open(json_path + json_filename, 'r', encoding=\"utf8\") as json_file:\n json_deals = json.load(json_file)\n rows = self._extract_amo_json_deals_to_rows(json_deals)\n return rows\n\n #Создаю таблицу сделок из всех JSON-файлов в директории\n def extract_amo_json_from_directory_to_rows(self, json_path):\n files_list = os.listdir(json_path)\n logger.debug(f'Список файлов для загрузки: {files_list}')\n rows = []\n for filename in files_list:\n rows = rows + self.extract_amo_json_file_to_rows(json_path, filename)\n return rows\n\n #Получаю словарь field_id: values из json_deal\n def _get_custom_values_dict_from_custom_field_values_json(self, json_cfv):\n cfv_dict = {}\n for field in json_cfv:\n if field['field_id'] == 648028:\n #Если это поле со списком товаров (648028), то сериализуем его\n field_value = json.dumps(field['values'], ensure_ascii=False)\n\n elif field['field_id'] == 593152:\n #Если это поле с причиной закрытия сделки, то указываем id-причины\n field_value = field['values'][0]['enum_id']\n\n else:\n field_value = field['values'][0]['value']\n cfv_dict[field['field_id']] = field_value\n \n return cfv_dict\n\n #Получаю словарь utm-меток из словаря custom\n #здесь хардкод id-полей, содержащих utm-метки\n #если поля меняются, нужно будет переписать эту функцию\n def _get_lead_utm_from_custom_values_dict(self, cf_dict):\n utm_dict = {}\n if 632884 in cf_dict:\n try:\n drupal_utm_dict = self._get_drupal_utm_dict(cf_dict[632884])\n utm_dict['lead_utm_source'] = drupal_utm_dict['source']\n utm_dict['lead_utm_medium'] = drupal_utm_dict['medium']\n utm_dict['lead_utm_campaign'] = drupal_utm_dict['campaign']\n utm_dict['lead_utm_content'] = drupal_utm_dict['content']\n utm_dict['lead_utm_term'] = drupal_utm_dict['keyword']\n except:\n logger.error(f'Ошибка в utm_метке: {cf_dict[632884]}')\n if not 'lead_utm_source' in utm_dict:\n if 648256 in cf_dict:\n utm_dict['lead_utm_source'] = cf_dict[648256] #ct_utm_source\n else:\n utm_dict['lead_utm_source'] = cf_dict.get(648158, '') #tilda_utm_source\n\n if not 'lead_utm_medium' in utm_dict:\n if 648258 in cf_dict:\n utm_dict['lead_utm_medium'] = cf_dict[648258] #ct_utm_medium\n else:\n utm_dict['lead_utm_medium'] = cf_dict.get(648160, '') #tilda_utm_medium\n\n if not 'lead_utm_campaign' in utm_dict:\n if 648260 in cf_dict:\n utm_dict['lead_utm_campaign'] = cf_dict[648260] #ct_utm_campaign\n else:\n utm_dict['lead_utm_campaign'] = cf_dict.get(648310, '') #tilda_utm_campaign\n\n if not 'lead_utm_content' in utm_dict:\n if 648262 in cf_dict:\n utm_dict['lead_utm_content'] = cf_dict[648262] #ct_utm_content\n else:\n utm_dict['lead_utm_content'] = cf_dict.get(648312, '') #tilda_utm_content\n\n if not 'lead_utm_term' in utm_dict:\n if 648264 in cf_dict:\n utm_dict['lead_utm_term'] = cf_dict[648264] #ct_utm_term\n else:\n utm_dict['lead_utm_term'] = cf_dict.get(648314, '') #tilda_utm_term\n\n return utm_dict\n\n #Парсим поле drupal_utm в словарь drupal_utm_dict\n def _get_drupal_utm_dict(self, drupal_utm):\n drupal_utm_list = drupal_utm.split(', ')\n drupal_utm_dict = dict(\n [item.split('=') for item in drupal_utm_list if '=' in item]\n )\n #Проверяем не поменяны ли местами метки (в старой статистике такое было)\n try:\n if drupal_utm_dict['medium'] in ['yandex', 'google']:\n #меняем местами source <=> medium\n s = drupal_utm_dict['source']\n drupal_utm_dict['source'] = drupal_utm_dict['medium']\n drupal_utm_dict['medium'] = s\n except:\n logger.error(f'Ошибка в поле utm_метка: {drupal_utm}')\n return drupal_utm_dict\n\n #Собираю строку таблицы из данных сделки amo_json\n #hardcode!!!\n def _get_row_from_amo_json_deal(self, json_deal):\n row = {}\n #Читаем json-файл общей схемы сделок\n with open(self.CONFIG['AMO_DEAL_FIELDS_SCHEMA'], 'r', encoding=\"utf8\") as json_file:\n amo_deal_fields = json.load(json_file)\n #Добавляем базовые поля\n for field in amo_deal_fields['base_amo_deal_fields']['fields']:\n if field['type'] == \"STRING\":\n row.update({field['name']: str(json_deal[field['amo_name']])})\n elif field['type'] == \"INTEGER\":\n if json_deal[field['amo_name']] == None:\n row.update({field['name']: None})\n else:\n row.update({field['name']: int(json_deal[field['amo_name']])})\n elif field['type'] == \"TIMESTAMP\":\n if json_deal[field['amo_name']] == None:\n row.update({field['name']: None})\n else:\n row.update({field['name']: pd.to_datetime(json_deal[field['amo_name']], unit='s')})\n elif field['type'] == \"BOOLEAN\":\n if json_deal[field['amo_name']] == None:\n row.update({field['name']: None})\n else:\n row.update({field['name']: bool(json_deal[field['amo_name']])})\n else:\n row.update({field['name']: json_deal[field['amo_name']]})\n\n #Если есть пользовательские поля, добавляем их\n custom_fields_values = json_deal.get('custom_fields_values')\n if custom_fields_values:\n #составляем словарь field_id: value\n custom_fields_dict = self._get_custom_values_dict_from_custom_field_values_json(custom_fields_values)\n for field in amo_deal_fields['custom_amo_deal_fields']['fields']:\n field_id = field['field_id']\n if field_id in custom_fields_dict:\n if field['type'] == \"STRING\":\n row[field['name']] = str(custom_fields_dict[field_id])\n elif field['type'] == \"INTEGER\":\n if custom_fields_dict[field_id] == None:\n row[field['name']] = None\n else:\n row[field['name']] = int(custom_fields_dict[field_id])\n elif field['type'] == \"TIMESTAMP\":\n if custom_fields_dict[field_id] == None:\n row[field['name']] = None\n else:\n row[field['name']] = pd.to_datetime(custom_fields_dict[field_id], unit='s')\n else:\n row[field['name']] = custom_fields_dict[field_id]\n #Вытаскиваем значения UTM-меток в единые поля lead_utm_...\n lead_utms = self._get_lead_utm_from_custom_values_dict(custom_fields_dict)\n row.update(lead_utms)\n\n #Добавляем поля из раздела _embedded (списки)\n for field in amo_deal_fields['_embedded_amo_deal_fields']['fields']:\n #сериализуем значение поля\n field_value = json.dumps(json_deal['_embedded'][field['amo_name']], ensure_ascii=False)\n row.update({field['name']: field_value})\n\n #Добавляем дополнительные поля\n row['created_at_timestamp'] = pd.to_datetime(row['created_at'], unit='s')\n row['updated_at_timestamp'] = pd.to_datetime(row['updated_at'], unit='s')\n if json_deal.get('trashed_at', None):\n row['trashed_at_timestamp'] = pd.to_datetime(json_deal.get('trashed_at', None), unit='s')\n return row\n\n #Получаю датафрейм сделок из JSON\n def _extract_amo_json_deals_to_dataframe(self, json_deals, pipelines_list):\n rows = self._extract_amo_json_deals_to_rows(json_deals, pipelines_list)\n df = pd.DataFrame(rows)\n return df\n\n #=====================\n # Работа над получением и обновлением сделок из AmoCRM\n #=====================\n\n #API Скачиваем все сделки из AmoCRM и раскладываем их в набор файлов JSON\n def _get_all_deals_ext_to_json(self, raw_json_path, json_raw_deals_filename):\n #Выкачиваем все сделки со списками из AMO отсортированные по дате создания по возрастанию и сохраняем их в JSON-файл\n #Разбиваем запрос на пачки, чтобы Amo нас не блокировало\n #Эту функцию используем тогда, когда у нас на сервере нет никакого списка сделок\n # json_raw_deals_filename - начало имени, под которым будут сохраняться файлы при скачивании из AmoCRM\n # json_week_deals_filename - начало имени, под которым будут храниться не��ельные пачки сделок\n page = 1\n #Если папки с raw_json нет - создаём\n if not os.path.isdir(raw_json_path):\n os.mkdir(raw_json_path)\n deals_pack = []\n has_more = True\n while has_more:\n #запрашиваем очередную страницу сделок из Амо\n logger.debug(f'Запрашиваю страницу {page} из AmoCRM')\n rs = self._get_deals_ext_created_date_inc(self.CONFIG['PAGE_SIZE'], page)\n #Если запрос сработал без ошибок\n if rs.status_code == 200:\n #добавляем полученные сделки в JSON коллекцию\n deals_pack += (json.loads(rs.text))['_embedded']['leads']\n #Если количество полученных записей достигла предела - сохраняем их в файл и обнуляем массив\n if page % self.CONFIG['PAGES_COUNT_PER_LOAD'] == 0:\n with open(raw_json_path + json_raw_deals_filename + str(page) + '.json', 'w', encoding=\"utf8\") as output_file:\n json.dump(deals_pack, output_file, ensure_ascii=False)\n deals_pack = []\n logger.info(f'{str(page)} выгружено в файл')\n elif rs.status_code == 204:\n logger.info('загрузка успешно завершена')\n has_more = False\n break\n else:\n logger.error(f'Ошибка {rs.status_code}')\n has_more = False\n break\n page += 1\n sleep(self.CONFIG['PAUSE_BETWIN_REQUESTS'])\n with open(raw_json_path + json_raw_deals_filename + str(page) + '.json', 'w', encoding=\"utf8\") as output_file:\n json.dump(deals_pack, output_file, ensure_ascii=False)\n return 0\n\n \n #API Получаем список сделок, отсортированный по возрастанию даты создания \n def _get_deals_ext_created_date_inc(self, limit, page):\n #API AmoCRM v4\n #Page - номер страницы (размер страницы = limit)\n #Код ответа 204 означает, что контента больше нет\n #Функция возвращает ответ на запрос к API AmoCRM\n url = 'https://' + self.CONFIG['SUBDOMAIN'] + '.amocrm.ru/api/v4/leads'\n params = {\n \"limit\": limit,\n \"page\": page,\n \"with\": \"catalog_elements,contacts,loss_reason\",\n \"order[created_at]\": \"inc\"\n }\n\n try:\n try_num = 1\n rs = requests.get(url, headers=self.CONFIG['AMO_API_REQUESTS_HEADERS'], params=params)\n while rs.status_code == 401:\n logger.error(f'Не удалось авторизоваться в API AmoCRM. Попытка {try_num}')\n self._refresh_access_token()\n sleep(2 ** try_num)\n rs = requests.get(url, headers=self.CONFIG['AMO_API_REQUESTS_HEADERS'], params=params)\n try_num += 1\n return rs\n except ConnectionError:\n logger.error('Ошибка ConnectionError ' + url)\n \n #API Получаем список сделок, отсортированный по убыванию даты последней модификации\n def _get_deals_ext_sorted_by_updated_date_desc(self, limit, page):\n #API AmoCRM v4\n #Получаем список сделок с элементами списков. limit = 0 .. 500, но лучше 50\n #Отсортирован по убыванию даты последней модификации\n #Page - номер страницы (размер страницы = limit)\n #Код ответа 204 означает, что контента больше нет\n #Функция возвращает ответ на запрос к API AmoCRM\n url = 'https://' + self.CONFIG['SUBDOMAIN'] + '.amocrm.ru/api/v4/leads'\n params = {\n \"limit\": limit,\n \"page\": page,\n \"with\": \"catalog_elements,contacts,loss_reason\",\n \"order[updated_at]\": \"desc\"\n }\n\n try:\n try_num = 1\n rs = requests.get(url, headers=self.CONFIG['AMO_API_REQUESTS_HEADERS'], params=params)\n while rs.status_code == 401:\n logger.error(f'Не удалось авторизоваться в API AmoCRM. Попытка {try_num}')\n self._refresh_access_token()\n sleep(2 ** try_num)\n rs = requests.get(url, headers=self.CONFIG['AMO_API_REQUESTS_HEADERS'], params=params)\n try_num += 1\n return rs\n except ConnectionError:\n logger.error('Ошибка ConnectionError ' + url)\n\n #Скачиваем все сделки, обновлённые после даты, во временную папку\n def _get_updated_deals_since_timestamp_to_json_temp_folder(self, last_update):\n page = 1\n limit = self.CONFIG['PAGE_SIZE']\n temp_path = self.CONFIG['DEALS_TEMP_JSON_PATH']\n #удаляем файлы из временной папки, если она существует\n if os.path.isdir(self.CONFIG['DEALS_TEMP_JSON_PATH']):\n for filename in os.listdir(temp_path):\n file_path = os.path.join(temp_path, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n logger.error('Failed to delete %s. Reason: %s' % (file_path, e))\n else:\n #Если папки нет\n #создаём пустую папку для временных файлов\n os.mkdir(self.CONFIG['DEALS_TEMP_JSON_PATH'])\n\n new_deals = []\n has_more = True\n while has_more:\n rs = self._get_deals_ext_sorted_by_updated_date_desc(limit, page)\n if rs.status_code == 200:\n new_deals += (json.loads(rs.text))['_embedded']['leads']\n #если последняя из загруженных сделок обновлена раньше заданной даты - завершаем скачивание\n if new_deals[-1]['updated_at'] < last_update:\n has_more = False\n\n #Если количество полученных записей достигла предела - сохраняем их в файл и обнуляем массив\n if page % self.CONFIG['PAGES_COUNT_PER_LOAD'] == 0:\n with open(self.CONFIG['DEALS_TEMP_JSON_PATH'] + 'temp_updated_leads_' + str(page) + '.json', 'w', encoding=\"utf8\") as output_file:\n json.dump(new_deals, output_file, ensure_ascii=False, indent=2)\n new_deals = []\n logger.info(f'{str(page)} выгружено в файл')\n elif rs.status_code == 204:\n logger.info('загрузка завершена')\n has_more = False\n break\n else:\n logger.log('connection_errors', f'Ошибка {rs.status_code}')\n has_more = False\n break\n page += 1\n sleep(self.CONFIG['PAUSE_BETWIN_REQUESTS'])\n logger.debug(f'Page: {page}')\n with open(self.CONFIG['DEALS_TEMP_JSON_PATH'] + 'temp_updated_leads_' + str(page) + '.json', 'w', encoding=\"utf8\") as output_file:\n json.dump(new_deals, output_file, ensure_ascii=False, indent=2)\n\n return 0\n \n #Добавляем пакет сделок в нашу базу AMO JSON WEEK (Список сделок в JSON, разбитый на файлы по неделям создания сделки)\n #Возвращаем json результата слияния\n def _merge_json_pack_to_json_week_deals(self, json_pack, week_json_path, week_json_filename):\n #Если базы JSON WEEK нет - создаём\n if not os.path.exists(week_json_path):\n logger.info('База AMO JSON WEEK не найдена')\n os.mkdir(week_json_path)\n logger.info('Создана новая база AMO JSON WEEK в каталоге ' + week_json_path)\n\n count_added_deals = 0\n count_updated_deals = 0\n result_json = [] #сюда собираем результаты синхронизации сделок\n\n #Пока в пачке есть сделки добавляем их в базу\n while len(json_pack) > 0:\n new_deal = json_pack[0]\n\n year = dt.datetime.fromtimestamp(new_deal['created_at']).isocalendar()[0]\n week = dt.datetime.fromtimestamp(new_deal['created_at']).isocalendar()[1]\n # year = dt.datetime.fromtimestamp(json_pack[0]['created_at']).isocalendar()[0]\n\n week = dt.datetime.fromtimestamp(json_pack[0]['created_at']).isocalendar()[1]\n\n \n #Если json с этой недели уже есть, дополняем его\n if os.path.isfile(week_json_path + week_json_filename + '_' + str(year) +'_'+ str(week).zfill(2) + '.json'):\n with open(week_json_path + week_json_filename + '_' + str(year) +'_'+ str(week).zfill(2) + '.json', 'r', encoding=\"utf8\") as week_json_file:\n logger.info(\"Дополняем файл \" + week_json_path + week_json_filename + '_' + str(year) +'_'+ str(week).zfill(2) + '.json',)\n week_deals = json.load(week_json_file)\n \n #��ока у новых сделок сохраняется номер недели\n while dt.datetime.fromtimestamp(json_pack[0]['created_at']).isocalendar()[0] == year and dt.datetime.fromtimestamp(json_pack[0]['created_at']).isocalendar()[1] == week:\n #Если сделка с таким id уже содержится в базе JSON WEEK - обновляем её\n if any(deal['id'] == json_pack[0]['id'] for deal in week_deals):\n #и обновляем инфу по этой сделке в week_deals\n week_deals = self._update_deal_in_json_deals(json_pack[0], week_deals)\n # logger.info(f'Сделка #{new_deal[\"id\"]} обновлена')\n count_updated_deals += 1\n #если id новой сделки уникален - добавляем сделку\n else:\n #добавляем инфу по сделке и синхронизируем week_deals с результатом добавления\n week_deals = self._add_deal_to_json_deals(json_pack[0], week_deals)\n # logger.info(f'Сделка #{json_pack[0][\"id\"]} добавлена в AMO JSON WEEK')\n count_added_deals += 1\n #добавляем результат merge в итоговый json\n result_json.append(week_deals[-1])\n #Удаляем добавленную сделку из пачки\n json_pack.pop(0)\n# print(len(result_json))\n #если сделок в пачке больше нет, сохраняем сделки в AMO JSON WEEK и завершаем функцию\n if len(json_pack) == 0:\n output_filename = week_json_filename + '_' + str(year) +'_'+ str(week).zfill(2) + '.json'\n with open(week_json_path + output_filename, 'w', encoding=\"utf8\") as output_file:\n json.dump(week_deals, output_file, ensure_ascii=False, indent=2)\n logger.info('Обновлён файл:' + output_filename)\n logger.info('Добавление пачки сделок завершено')\n logger.info(f'{count_added_deals} было добавлено')\n logger.info(f'{count_updated_deals} было обновлено')\n# result_json.append(week_deals)\n# result_json += week_deals\n# print(len(result_json))\n return result_json\n\n #Если неделя в новой сделке отличаеся от той, с которой работали, то сохраняем сделки в AMO JSON WEEK и начинаем работу с новой неделей\n output_filename = week_json_filename + '_' + str(year) +'_'+ str(week).zfill(2) + '.json'\n with open(week_json_path + output_filename, 'w', encoding=\"utf8\") as output_file:\n json.dump(week_deals, output_file, ensure_ascii=False, indent=2)\n logger.info('Обновлён файл:' + output_filename)\n# result_json.append(week_deals)\n# result_json += week_deals\n# print(len(result_json))\n continue\n\n #Если в AMO JSON WEEK нет файла соответствующей недели, создаём его и наполняем\n else:\n logger.info(f'Добавляем в AMO JSON WEEK новую неделю: {week}, год: {year}')\n week_deals = []\n \n #Пока у новых сделок сохраняется номер недели\n while dt.datetime.fromtimestamp(json_pack[0]['created_at']).isocalendar()[0] == year and dt.datetime.fromtimestamp(json_pack[0]['created_at']).isocalendar()[1] == week:\n week_deals = self._add_deal_to_json_deals(json_pack[0], week_deals)\n count_added_deals += 1\n result_json.append(week_deals[-1])\n json_pack.pop(0)\n #если сделок в пачке больше нет, сохраняем сделки в AMO JSON WEEK и завершаем функцию\n if len(json_pack) == 0:\n output_filename = week_json_filename + '_' + str(year) +'_'+ str(week).zfill(2) + '.json'\n with open(week_json_path + output_filename, 'w', encoding=\"utf8\") as output_file:\n json.dump(week_deals, output_file, ensure_ascii=False, indent=2)\n logger.info('Создан файл:' + output_filename)\n logger.info('Добавление пачки сделок завершено')\n logger.info(f'{count_added_deals} было добавлено')\n logger.info(f'{count_updated_deals} было обновлено')\n# result_json.append(week_deals)\n# result_json += week_deals\n print(len(result_json))\n return result_json\n \n\n #Если неделя в новой сделке отличаеся от той, с которой работали, то сохраняем сделки в AMO JSON WEEK и начинаем работу с новой неделей\n output_filename = week_json_filename + '_' + str(year) +'_'+ str(week).zfill(2) + '.json'\n with open(week_json_path + output_filename, 'w', encoding=\"utf8\") as output_file:\n json.dump(week_deals, output_file, ensure_ascii=False, indent=2)\n logger.info('Обновлён файл:' + output_filename)\n# result_json.append(week_deals)\n # result_json += week_deals\n# print(len(result_json))\n continue \n \n #Если в пачке кончились сделки - завершаем функцию\n return result_json\n\n def _update_deal_in_json_deals(self, new_deal, deals):\n #Находим индекс сделки в пачке WEEK_JSON\n deal_index = [old_deal['id'] for old_deal in deals].index(new_deal['id'])\n #Если дата изменения соответствующей сделки в AMO JSON WEEK не меньше даты изменения new_deal - ничего не меняем\n #Переставляем сделку в конец JSON\n if deals[deal_index]['updated_at'] >= new_deal['updated_at']:\n new_deal = deals[deal_index]\n deals.pop(deal_index)\n deals = self._add_deal_to_json_deals(new_deal, deals) \n \n #Если дата изменения новой сделки больше, а старая сделка содержит информацию о дате перехода в статус \"Не целевой\" (trash)\n # - заменяем старую сделку на новую, сохраная дату перехода в статус \"Не целевой\"\n elif 'trashed_at' in deals[deal_index]:\n new_deal['trashed_at'] = deals[deal_index]['trashed_at']\n deals.pop(deal_index)\n deals = self._add_deal_to_json_deals(new_deal, deals)\n \n #Иначе - просто заменяем старую сделку на новую\n else:\n deals.pop(deal_index)\n deals = self._add_deal_to_json_deals(new_deal, deals)\n\n return deals\n\n #добавляем сделку в базу AMO JSON WEEK\n def _add_deal_to_json_deals(self, new_deal, deals):\n #Если сделка в статусе \"Треш - нецелевые\", то добавляем дату последней модификации в поле ['trashed_at']\n if new_deal['status_id'] in self.CONFIG['AMO_TRASH_STATUSES_ID'] and not 'trashed_at' in new_deal:\n new_deal['trashed_at'] = new_deal['updated_at']\n #добавляем обновлённую сделку в конец списка сделок\n deals.append(new_deal)\n #deals += new_deal\n # print(len(deals))\n # output_filename = 'output_deals_' + str(new_deal['id'])\n # with open('output/' + output_filename + '.json', 'w', encoding=\"utf8\") as output_file:\n # json.dump(deals, output_file, ensure_ascii=False, indent=2)\n return deals\n\n #Разбираем данные по сделкам, скаченные из Амо в json и раскладываем их в файлы по неделям\n #Возвращаем json результата слияния\n def _put_deals_from_raw_json_to_week_json(self, raw_json_path, week_json_path, week_json_filename):\n ###Упорядочивание архива json-файлов\n ###Перепаковываем сделки в файлы, группирующие их по неделе создания\n ###Название файлов: week_json_filename_YYYY_WW.json\n ###Недели нумеруются по ISO\n ###первой неделей года считается неделя, содержащая первый четверг года, что эквивалентно следующим выражениям:\n ### неделя, содержащая 4 января;\n ### неделя, в которой 1 января это понедельник, вторник, среда или четверг;\n files_list = os.listdir(raw_json_path)\n for filename in files_list: \n with open(raw_json_path + filename, 'r', encoding=\"utf8\") as json_file:\n deals = json.load(json_file)\n deals.sort(key=operator.itemgetter('created_at'))\n #Добавляем новые сделки в AMO JSON WEEK\n updated_deals = self._merge_json_pack_to_json_week_deals(deals, week_json_path, week_json_filename)\n logger.info('загрузка в AMO JSON WEEK завершена')\n return updated_deals\n\n #Разбираем данные по сделкам, скаченные из Амо во временную папку json\n def _put_deals_from_temp_json_to_week_json(self, week_json_path):\n # и раскладываем их в файлы по неделям\n # используется при обновлении инфы в amo json week\n # возвращает список строк-сделок, которые были добавлены/обновлены\n # есть опасность, что итоговый список может получиться огромным, но \n # пусть пока будет так. До рефакторинга.\n # - основная функция при обновлении базы сделок из AmoCRM\n files_list = os.listdir(self.CONFIG['DEALS_TEMP_JSON_PATH'])\n result_json = {} #сюда собираем json с обновлёнными и добавленными сделками \n for filename in files_list: \n with open(self.CONFIG['DEALS_TEMP_JSON_PATH'] + filename, 'r', encoding=\"utf8\") as json_file:\n deals = json.load(json_file)\n deals.sort(key=operator.itemgetter('created_at'))\n #Добавляем новые сделки в AMO JSON WEEK \n result_json.append(self._merge_json_pack_to_json_week_deals(deals, week_json_path))\n rows = self.extract_amo_json_deals_to_rows(result_json)\n logger.info(f'Обновление AMO JSON WEEK завершено. Обновлено/добавлено {len(rows)} сделок')\n return rows\n \n #---- Внешние методы -----------\n # Скачать все сделки из AmoCRM\n def get_all_deals_from_crm(self):\n self._get_all_deals_ext_to_json(self.CONFIG['DEALS_RAW_JSON_PATH'], self.CONFIG['JSON_DEALS_RAW_FILENAME'])\n self._put_deals_from_raw_json_to_week_json(self.CONFIG['DEALS_RAW_JSON_PATH'], self.CONFIG['DEALS_WEEK_JSON_PATH'], self.CONFIG['DEALS_WEEK_JSON_FILENAME'])\n\n #скачать сделки, обновлённые и созданные после определённого DateTime\n #и загрузить их в базу\n def get_updated_deals_dataframe_from_crm(self, last_updated_at, head, pipelines_list):\n #head - список столбцов датафрейма\n #скачиваем обновления во временную папку\n self._get_updated_deals_since_timestamp_to_json_temp_folder(last_updated_at)\n #раскладываем скаченные сделки по неделям и получаем результат обновления\n updated_deals = self._put_deals_from_raw_json_to_week_json(self.CONFIG['DEALS_TEMP_JSON_PATH'], self.CONFIG['DEALS_WEEK_JSON_PATH'], self.CONFIG['DEALS_WEEK_JSON_FILENAME'])\n df = pd.DataFrame(columns = head)\n df = df.append(self._extract_amo_json_deals_to_dataframe(updated_deals, pipelines_list))\n #удаляем дубликаты записей\n result_df = df.drop_duplicates(keep='last')\n if len(result_df) < len(df):\n logger.log('df_errors.log', 'В датафрейме были обнаружены дубликаты. удалены.')\n return df\n \n # Получить датафрейм сделок созданных в определённую ISO-неделю\n def get_deals_dataframe_by_week(self, year, week_num, head, pipelines_list):\n filename = self.CONFIG['DEALS_WEEK_JSON_FILENAME'] + '_' + str(year) + '_' + str(week_num).zfill(2) + '.json'\n with open(self.CONFIG['DEALS_TEMP_JSON_PATH'] + filename, 'r', encoding=\"utf8\") as json_file:\n json_deals = json.load(json_file)\n df = pd.DataFrame(columns = head)\n df = df.append(self._extract_amo_json_deals_to_dataframe(json_deals, pipelines_list))\n return df\n\n # Получить датафрейм сделок созданных в определённый год\n def get_deals_dataframe_by_year(self, year, head, pipelines_list):\n \n files_list = os.listdir(self.CONFIG['DEALS_TEMP_JSON_PATH'])\n year_files_list = [file for file in files_list if ('_' + str(year) + '_') in file]\n rows = []\n for filename in year_files_list:\n with open(self.CONFIG['DEALS_TEMP_JSON_PATH'] + filename, 'r', encoding=\"utf8\") as json_file:\n json_deals = json.load(json_file)\n rows += self._extract_amo_json_deals_to_rows(json_deals, pipelines_list)\n df = pd.DataFrame(columns=head)\n df = df.append(pd.DataFrame(rows))\n #удаляем дубликаты записей\n result_df = df.drop_duplicates(keep='last')\n if len(result_df) < len(df):\n logger.log('df_errors.log', 'В датафрейме были обнаружены дубликаты. удалены.')\n return df\n\n #Получить датафрейм из файла json\n def get_deals_dataframe_from_file(self, path, filename, head, pipelines_list):\n with open(path + filename, 'r', encoding=\"utf8\") as json_file:\n json_deals = json.load(json_file)\n df = pd.DataFrame(columns = head)\n df = df.append(self._extract_amo_json_deals_to_dataframe(json_deals, pipelines_list))\n return df\n\n\n def get_deals_week_json_path(self):\n return self.CONFIG['DEALS_WEEK_JSON_PATH']\n \n ###=========================\n\n\n\n\n\n\nif __name__ == \"__main__\":\n \n week_parser = AmoCRM()\n temp_json_path = week_parser.CONFIG['DEALS_TEMP_JSON_PATH']\n week_json_path = week_parser.CONFIG['DEALS_WEEK_JSON_PATH']\n week_json_filename = week_parser.CONFIG['DEALS_WEEK_JSON_FILENAME']\n filename = 'temp_updated_leads_5.json'\n output_filename = 'output_json.json'\n with open(temp_json_path + filename, 'r', encoding=\"utf8\") as json_file:\n deals = json.load(json_file)\n\n\n# print(f'В TEMP JSON {len(deals)} сделок')\n #раскладываем сделки из временной директории по файлам JSON WEEK\n #упорядочеваем сделки в JSON_PACK - это сокращает количество итераций при проверке наличия сделки в базе\n# deals.sort(key=operator.itemgetter('created_at'))\n\n updated_deals = week_parser._merge_json_pack_to_json_week_deals(deals, week_json_path, week_json_filename)\n with open('output/' + output_filename, 'w', encoding=\"utf8\") as output_file:\n json.dump(updated_deals, output_file, ensure_ascii=False, indent=2)\n# print(f'В UPDATED JSON {len(updated_deals)} сделок')","sub_path":"AmoCRM.py","file_name":"AmoCRM.py","file_ext":"py","file_size_in_byte":46266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"145988055","text":"#!/usr/bin/env python\n\nimport networkx as nx\nfrom networkx.drawing.nx_pydot import pydot_layout\nfrom networkx import DiGraph, draw_networkx\nimport matplotlib.pyplot as plt\n\nimport io, re\n\n\ng = nx.DiGraph()\ng.add_edge(1,2)\ng.add_edge(1,3)\ng.add_edge(2,3)\ng.add_edge(3,4)\n\npos = pydot_layout(g, prog='dot')\ndraw_networkx(g, pos=pos)\n\ndata = io.StringIO()\nplt.savefig(data, format='svg')\nplt.clf()\nplt.cla()\nplt.close()\n\nre_svg_begin_and_rest = re.compile(r'^.*?()(.*)', re.MULTILINE|re.DOTALL)\nre_width = re.compile('^( 500:\n break\ndf = pd.concat(dfs)\nprint(len(df.index))\n\nprint(df.dtypes)\ndf['date'] = pd.to_datetime(df['date'], format='%Y/%m/%d')\n#df['vol'] = df['vol'].astype('u8')\nprint(df.dtypes)\n\nformatter = '{0:<25}{1}'\n# write\nDataRWTest.test_hdf_fixed_write(df)\n'''\nprint('write')\nstart = time.time()\nDataRWTest.test_sql_write(df)\nend = time.time()\nprint(formatter.format('test_sql_write', end - start))\n\nstart = time.time()\nDataRWTest.test_hdf_fixed_write(df)\nend = time.time()\nprint(formatter.format('test_hdf_fixed_write', end - start))\n\nstart = time.time()\nDataRWTest.test_hdf_table_write(df)\nend = time.time()\nprint(formatter.format('test_hdf_table_write', end - start))\n\nstart = time.time()\nDataRWTest.test_csv_write(df)\nend = time.time()\nprint(formatter.format('test_csv_write', end - start))\n'''\n# read\n'''\nprint('read')\nstart = time.time()\nDataRWTest.test_sql_read()\nend = time.time()\nprint(formatter.format('test_sql_read', end - start))\n\nstart = time.time()\nDataRWTest.test_hdf_fixed_read()\nend = time.time()\nprint(formatter.format('test_hdf_fixed_read', end - start))\n\nstart = time.time()\nDataRWTest.test_hdf_table_read()\nend = time.time()\nprint(formatter.format('test_hdf_table_read', end - start))\n\nstart = time.time()\nDataRWTest.test_csv_read()\nend = time.time()\nprint(formatter.format('test_csv_read', end - start))\n'''\n","sub_path":"test/datarw.py","file_name":"datarw.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"639827612","text":"import json\nfrom random import randint\nimport random\nimport datetime\nimport time\nimport csv\n\nimport numpy\nfrom numpy import arange, array, ones\nfrom scipy import stats\n\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.utils.six.moves import range\nfrom django.http import StreamingHttpResponse\n\nfrom .models import User\nfrom .models import HoltLaury\nfrom .models import Gamble\nfrom .models import Investment\nfrom .models import Pretest\nfrom .models import Training\nfrom .models import Thankyou\n\n@ensure_csrf_cookie\ndef login(request):\n if request.method == 'POST':\n if 'username' in request.POST and request.POST['username'] != '':\n umid = request.POST['username']\n user, created = User.objects.get_or_create(username=umid)\n request.session['umid'] = user.username\n\n return welcome(request)\n\ndef logout(request):\n del request.session['umid']\n return welcome(request)\n\ndef welcome(request):\n if ('REMOTE_USER' in request.META or request.session.get('umid', False)):\n if ('REMOTE_USER' in request.META):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False)):\n umid = request.session['umid']\n user, created = User.objects.get_or_create(username=umid)\n user.version = \"AfterExperiment\"\n user.save()\n request.session['startedStudy'] = datetime.datetime.now().strftime(\"%b %d %Y %I:%M:%S %p\")\n else:\n umid = \"\"\n context = { 'umid': umid, 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef pretest(request, question):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n request.session['started'] = datetime.datetime.now().strftime(\"%b %d %Y %I:%M:%S %p\")\n user = User.objects.get(username=umid)\n if user.pretest_set.count() != 0:\n if question == \"\":\n question = \"1\"\n question = int(question)\n pretest = user.pretest_set.all()[0]\n for i in range(1, question + 1):\n if i == 1:\n answer = pretest.question1\n correct = pretest.correct1\n elif i == 2:\n answer = pretest.question2\n correct = pretest.correct2\n elif i == 3:\n answer = pretest.question3\n correct = pretest.correct3\n elif i == 4:\n answer = pretest.question4\n correct = pretest.correct4\n elif i == 5:\n answer = pretest.question5\n correct = pretest.correct5\n elif i == 6:\n answer = pretest.question6\n correct = pretest.correct6\n elif i == 7:\n answer = pretest.question7\n correct = pretest.correct7\n if answer == -1:\n question = i\n context = { 'umid': umid, 'answer':answer, 'question':question, 'welcomepage':1 }\n return render(request, 'games/Pretest.html', context)\n context = { 'umid': umid, 'answer':answer, 'question':question, 'correct':correct, 'welcomepage':1 }\n return render(request, 'games/Pretest.html', context)\n\n context = { 'umid': umid, 'question':1, 'welcomepage':1 }\n return render(request, 'games/Pretest.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef pretestsubmit(request):\n if request.method == 'POST':\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('answer' in request.POST and request.POST['answer'] != '' and\n 'questionclicked' in request.POST and request.POST['questionclicked'] != '' and\n 'questionrightclicked' in request.POST and request.POST['questionrightclicked'] != '' and\n 'questionhovered' in request.POST and request.POST['questionhovered'] != '' and\n 'questionhoveredseconds' in request.POST and request.POST['questionhoveredseconds'] != '' and\n 'question' in request.POST and request.POST['question'] != ''):\n \n answer = int(request.POST['answer'])\n questionclicked = int(request.POST['questionclicked'])\n questionrightclicked = int(request.POST['questionrightclicked'])\n questionhovered = int(request.POST['questionhovered'])\n questionhoveredseconds = float(request.POST['questionhoveredseconds'])\n question = int(request.POST['question'])\n user = User.objects.get(username=umid)\n pretest = None\n if user.pretest_set.count() != 0:\n pretest = user.pretest_set.all()[0]\n if question == 1:\n if answer == 0:\n correct1 = 1\n else:\n correct1 = 2\n if pretest == None:\n user.pretest_set.create(\n question1=answer,\n correct1=correct1,\n questionclicked1=questionclicked,\n questionrightclicked1=questionrightclicked,\n questionhovered1=questionhovered,\n questionhoveredseconds1=questionhoveredseconds,\n startedquestion1=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion1=datetime.datetime.now())\n return redirect('../pretest/2/')\n if question == 2:\n if answer == 1:\n correct2 = 1\n else:\n correct2 = 2\n if pretest.question2 == -1:\n user.pretest_set.update(\n question2=answer,\n correct2=correct2,\n questionclicked2=questionclicked,\n questionrightclicked2=questionrightclicked,\n questionhovered2=questionhovered,\n questionhoveredseconds2=questionhoveredseconds,\n startedquestion2=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion2=datetime.datetime.now())\n return redirect('../pretest/3/')\n if question == 3:\n if answer == 0:\n correct3 = 1\n else:\n correct3 = 2\n if pretest.question3 == -1:\n user.pretest_set.update(\n question3=answer,\n correct3=correct3,\n questionclicked3=questionclicked,\n questionrightclicked3=questionrightclicked,\n questionhovered3=questionhovered,\n questionhoveredseconds3=questionhoveredseconds,\n startedquestion3=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion3=datetime.datetime.now())\n return redirect('../pretest/4/')\n if question == 4:\n if answer == 0:\n correct4 = 1\n else:\n correct4 = 2\n if pretest.question4 == -1:\n user.pretest_set.update(\n question4=answer,\n correct4=correct4,\n questionclicked4=questionclicked,\n questionrightclicked4=questionrightclicked,\n questionhovered4=questionhovered,\n questionhoveredseconds4=questionhoveredseconds,\n startedquestion4=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion4=datetime.datetime.now())\n return redirect('../pretest/5/')\n if question == 5:\n if answer == 0:\n correct5 = 1\n else:\n correct5 = 2\n if pretest.question5 == -1:\n user.pretest_set.update(\n question5=answer,\n correct5=correct5,\n questionclicked5=questionclicked,\n questionrightclicked5=questionrightclicked,\n questionhovered5=questionhovered,\n questionhoveredseconds5=questionhoveredseconds,\n startedquestion5=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion5=datetime.datetime.now())\n return redirect('../pretest/6/')\n if question == 6:\n if answer == 0:\n correct6 = 1\n else:\n correct6 = 2\n if pretest.question6 == -1:\n user.pretest_set.update(\n question6=answer,\n correct6=correct6,\n questionclicked6=questionclicked,\n questionrightclicked6=questionrightclicked,\n questionhovered6=questionhovered,\n questionhoveredseconds6=questionhoveredseconds,\n startedquestion6=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion6=datetime.datetime.now())\n return redirect('../pretest/7/')\n if question == 7:\n if answer == 1:\n correct7 = 1\n else:\n correct7 = 2\n if pretest.question7 == -1:\n user.pretest_set.update(\n question7=answer,\n correct7=correct7,\n questionclicked7=questionclicked,\n questionrightclicked7=questionrightclicked,\n questionhovered7=questionhovered,\n questionhoveredseconds7=questionhoveredseconds,\n startedquestion7=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion7=datetime.datetime.now())\n return redirect('../pretest/results')\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef pretestresults(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n user = User.objects.get(username=umid)\n if user.pretest_set.count() != 0:\n question = 7\n answers = [-1]*8\n pretest = user.pretest_set.all()[0]\n correct = [-1]*8\n for i in range(1, question + 1):\n if i == 1:\n answer = pretest.question1\n answers[1] = pretest.question1\n correct[1] = pretest.correct1\n elif i == 2:\n answer = pretest.question2\n answers[2] = pretest.question2\n correct[2] = pretest.correct2\n elif i == 3:\n answer = pretest.question3\n answers[3] = pretest.question3\n correct[3] = pretest.correct3\n elif i == 4:\n answer = pretest.question4\n answers[4] = pretest.question4\n correct[4] = pretest.correct4\n elif i == 5:\n answer = pretest.question5\n answers[5] = pretest.question5\n correct[5] = pretest.correct5\n elif i == 6:\n answer = pretest.question6\n answers[6] = pretest.question6\n correct[6] = pretest.correct6\n elif i == 7:\n answer = pretest.question7\n answers[7] = pretest.question7\n correct[7] = pretest.correct7\n if answer == -1:\n question = i\n context = { 'umid': umid, 'answer':answer, 'question':question, 'welcomepage':1 }\n return render(request, 'games/Pretest.html', context)\n correctResult = 0\n wrongResult = 0\n for i in range(1, 8):\n if correct[i] == 1:\n correctResult = correctResult + 1\n elif correct[i] == 2:\n wrongResult = wrongResult + 1\n context = { 'umid': umid, 'answers':answers, 'question':question, 'correct':correct, \n 'correctResult':correctResult, 'wrongResult':wrongResult, 'welcomepage':1 }\n return render(request, 'games/Pretest Results.html', context)\n\n return redirect('../1/')\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef trainingwelcome(request):\n if ('REMOTE_USER' in request.META or request.session.get('umid', False)):\n if ('REMOTE_USER' in request.META):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False)):\n umid = request.session['umid']\n else:\n umid = \"\"\n context = { 'umid': umid }\n return render(request, 'games/Training Welcome.html', context)\n\ndef trainingintro(request, question):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n request.session['started'] = datetime.datetime.now().strftime(\"%b %d %Y %I:%M:%S %p\")\n user = User.objects.get(username=umid)\n if user.training_set.count() != 0:\n if question == \"\":\n question = \"1\"\n question = int(question)\n context = { 'umid': umid, 'question':question, 'welcomepage': 1 }\n return render(request, 'games/Training Intro.html', context)\n\n context = { 'umid': umid, 'question':1, 'welcomepage': 1 }\n return render(request, 'games/Training Intro.html', context)\n\n context = { 'umid': '', 'welcomepage': 1, 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef training(request, question):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n user = User.objects.get(username=umid)\n if user.training_set.count() != 0:\n if question == \"\":\n question = \"1\"\n question = int(question)\n training = user.training_set.all()[0]\n for i in range(1, question + 1):\n if i == 1:\n answer = training.question1\n correct = training.correct1\n elif i == 2:\n answer = training.question2\n correct = training.correct2\n elif i == 3:\n answer = training.question3\n correct = training.correct3\n elif i == 4:\n answer = training.question4\n correct = training.correct4\n if answer == -1:\n question = i\n context = { 'umid': umid, 'answer':answer, 'question':question, 'welcomepage': 1 }\n return render(request, 'games/Training.html', context)\n context = { 'umid': umid, 'answer':answer, 'question':question, 'correct':correct, 'welcomepage': 1 }\n return render(request, 'games/Training.html', context)\n\n context = { 'umid': umid, 'question':1, 'welcomepage': 1 }\n return render(request, 'games/Training.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef trainingsubmit(request):\n if request.method == 'POST':\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('answer' in request.POST and request.POST['answer'] != '' and\n 'questionclicked' in request.POST and request.POST['questionclicked'] != '' and\n 'questionrightclicked' in request.POST and request.POST['questionrightclicked'] != '' and\n 'questionhovered' in request.POST and request.POST['questionhovered'] != '' and\n 'questionhoveredseconds' in request.POST and request.POST['questionhoveredseconds'] != '' and\n 'question' in request.POST and request.POST['question'] != ''):\n \n answer = int(request.POST['answer'])\n questionclicked = int(request.POST['questionclicked'])\n questionrightclicked = int(request.POST['questionrightclicked'])\n questionhovered = int(request.POST['questionhovered'])\n questionhoveredseconds = float(request.POST['questionhoveredseconds'])\n question = int(request.POST['question'])\n user = User.objects.get(username=umid)\n training = None\n if user.training_set.count() != 0:\n training = user.training_set.all()[0]\n if question == 1:\n if answer == 1:\n correct1 = 1\n else:\n correct1 = 2\n if training == None:\n user.training_set.create(\n question1=answer,\n correct1=correct1,\n questionclicked1=questionclicked,\n questionrightclicked1=questionrightclicked,\n questionhovered1=questionhovered,\n questionhoveredseconds1=questionhoveredseconds,\n startedquestion1=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion1=datetime.datetime.now())\n return redirect('../training/1/')\n if question == 2:\n if answer == 1:\n correct2 = 1\n else:\n correct2 = 2\n if training.question2 == -1:\n user.training_set.update(\n question2=answer,\n correct2=correct2,\n questionclicked2=questionclicked,\n questionrightclicked2=questionrightclicked,\n questionhovered2=questionhovered,\n questionhoveredseconds2=questionhoveredseconds,\n startedquestion2=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion2=datetime.datetime.now())\n return redirect('../training/2/')\n if question == 3:\n if answer == 0:\n correct3 = 1\n else:\n correct3 = 2\n if training.question3 == -1:\n user.training_set.update(\n question3=answer,\n correct3=correct3,\n questionclicked3=questionclicked,\n questionrightclicked3=questionrightclicked,\n questionhovered3=questionhovered,\n questionhoveredseconds3=questionhoveredseconds,\n startedquestion3=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion3=datetime.datetime.now())\n return redirect('../training/3/')\n if question == 4:\n if answer == 1:\n correct4 = 1\n else:\n correct4 = 2\n if training.question4 == -1:\n user.training_set.update(\n question4=answer,\n correct4=correct4,\n questionclicked4=questionclicked,\n questionrightclicked4=questionrightclicked,\n questionhovered4=questionhovered,\n questionhoveredseconds4=questionhoveredseconds,\n startedquestion4=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedquestion4=datetime.datetime.now())\n return redirect('../training/4/')\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef trainingthankyou(request):\n if ('REMOTE_USER' in request.META or request.session.get('umid', False)):\n if ('REMOTE_USER' in request.META):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False)):\n umid = request.session['umid']\n else:\n umid = \"\"\n context = { 'umid': umid, 'welcomepage': 1, 'trainingThankYou': 1 }\n return render(request, 'games/Training Thankyou.html', context)\n\ndef trainingfinal(request):\n if ('REMOTE_USER' in request.META or request.session.get('umid', False)):\n if ('REMOTE_USER' in request.META):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False)):\n umid = request.session['umid']\n else:\n umid = \"\"\n context = { 'umid': umid, 'welcomepage': 1 }\n return render(request, 'games/Training Final.html', context)\n\ndef consent(request):\n if ('REMOTE_USER' in request.META or request.session.get('umid', False)):\n if ('REMOTE_USER' in request.META):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False)):\n umid = request.session['umid']\n else:\n umid = \"\"\n context = { 'umid': umid, 'welcomepage': 1 }\n return render(request, 'games/Consent.html', context)\n\n# def training(request):\n# try:\n# if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n# (request.session.get('umid', False) and request.session['umid'] != \"\")):\n# if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n# umid = request.META['REMOTE_USER']\n# if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n# umid = request.session['umid']\n# else:\n# umid = \"\"\n# context = { 'umid': umid }\n# # return render(request, 'games/Training.html', context)\n# return redirect('/static/games/Phishing_Game_Keyboard_Accessible/index.html')\n# except:\n# context = { 'umid': '', 'welcomepage': 1 }\n# return render(request, 'games/Welcome.html', context)\n\ndef gameselection(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n user = User.objects.get(username=umid)\n if user.firstgame == \"\":\n randNum = randint(1, 6)\n if randNum == 1:\n user.firstgame = \"lottery\"\n user.secondgame = \"investment\"\n user.thirdgame = \"gamble\"\n elif randNum == 2:\n user.firstgame = \"lottery\"\n user.secondgame = \"gamble\"\n user.thirdgame = \"investment\"\n elif randNum == 3:\n user.firstgame = \"gamble\"\n user.secondgame = \"investment\"\n user.thirdgame = \"lottery\"\n elif randNum == 4:\n user.firstgame = \"gamble\"\n user.secondgame = \"lottery\"\n user.thirdgame = \"investment\"\n elif randNum == 5:\n user.firstgame = \"investment\"\n user.secondgame = \"lottery\"\n user.thirdgame = \"gamble\"\n elif randNum == 6:\n user.firstgame = \"investment\"\n user.secondgame = \"gamble\"\n user.thirdgame = \"lottery\"\n user.optout = False\n user.postpone = False\n user.save()\n if user.firstgame == \"lottery\":\n return redirect('../lottery')\n elif user.firstgame == \"investment\":\n return redirect('../investment')\n return redirect('../gamble')\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef nextgame(request, current):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n user = User.objects.get(username=umid)\n if user.firstgame != \"\":\n if user.firstgame == \"lottery\" and user.secondgame == \"gamble\" and user.thirdgame == \"investment\":\n if current == \"lottery\":\n return redirect('../../gamble')\n if current == \"gamble\":\n return redirect('../../investment')\n if current == \"investment\":\n return redirect('../../thankyou')\n if user.firstgame == \"investment\" and user.secondgame == \"gamble\" and user.thirdgame == \"lottery\":\n if current == \"investment\":\n return redirect('../../gamble')\n if current == \"gamble\":\n return redirect('../../lottery')\n if current == \"lottery\":\n return redirect('../../thankyou')\n if user.firstgame == \"gamble\" and user.secondgame == \"lottery\" and user.thirdgame == \"investment\":\n if current == \"gamble\":\n return redirect('../../lottery')\n if current == \"lottery\":\n return redirect('../../investment')\n if current == \"investment\":\n return redirect('../../thankyou')\n if user.firstgame == \"gamble\" and user.secondgame == \"investment\" and user.thirdgame == \"lottery\":\n if current == \"gamble\":\n return redirect('../../investment')\n if current == \"investment\":\n return redirect('../../lottery')\n if current == \"lottery\":\n return redirect('../../thankyou')\n if user.firstgame == \"lottery\" and user.secondgame == \"investment\" and user.thirdgame == \"gamble\":\n if current == \"lottery\":\n return redirect('../../investment')\n if current == \"investment\":\n return redirect('../../gamble')\n if current == \"gamble\":\n return redirect('../../thankyou')\n if user.firstgame == \"investment\" and user.secondgame == \"lottery\" and user.thirdgame == \"gamble\":\n if current == \"investment\":\n return redirect('../../lottery')\n if current == \"lottery\":\n return redirect('../../gamble')\n if current == \"gamble\":\n return redirect('../../thankyou')\n else:\n return redirect('../../gameselection')\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef lottery(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n request.session['started'] = datetime.datetime.now().strftime(\"%b %d %Y %I:%M:%S %p\")\n user = User.objects.get(username=umid)\n gameNum = 1\n if user.firstgame == \"lottery\":\n gameNum = 1\n elif user.secondgame == \"lottery\":\n gameNum = 2\n elif user.thirdgame == \"lottery\":\n gameNum = 3\n if user.holtlaury_set.count() != 0:\n holtLaury = user.holtlaury_set.all()[0]\n decision = holtLaury.decision\n die = [None]*11\n die[1] = holtLaury.die1\n die[2] = holtLaury.die2\n die[3] = holtLaury.die3\n die[4] = holtLaury.die4\n die[5] = holtLaury.die5\n die[6] = holtLaury.die6\n die[7] = holtLaury.die7\n die[8] = holtLaury.die8\n die[9] = holtLaury.die9\n die[10] = holtLaury.die10\n option = [None]*11\n option[1] = holtLaury.option1\n option[2] = holtLaury.option2\n option[3] = holtLaury.option3\n option[4] = holtLaury.option4\n option[5] = holtLaury.option5\n option[6] = holtLaury.option6\n option[7] = holtLaury.option7\n option[8] = holtLaury.option8\n option[9] = holtLaury.option9\n option[10] = holtLaury.option10\n result = holtLaury.points\n originalPoints = holtLaury.originalPoints\n willingnessNum = holtLaury.willingness\n willingnessRand = holtLaury.willingnessRand\n context = { 'umid': umid, 'decision':decision, 'die':die,\n 'option':option, 'result':result,\n 'originalPoints':originalPoints, 'willingnessNum':willingnessNum,\n 'willingnessRand':willingnessRand, 'gameNum':gameNum }\n return render(request, 'games/Holt-Laury Lottery.html', context)\n \n context = { 'umid': umid, 'gameNum':gameNum }\n return render(request, 'games/Holt-Laury Lottery.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef lotterySubmit(request):\n if request.method == 'POST':\n requestPost = json.loads(request.body.decode('utf-8'))\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('DecisionsOption' in requestPost and requestPost['DecisionsOption'] != ''):\n DecisionsOption = requestPost['DecisionsOption']\n user = User.objects.get(username=umid)\n\n decision = randint(1, 10)\n die = [None]*11\n result = 0\n\n for index in range(1, 11): \n die[index] = randint(1, 10)\n if decision == index:\n if DecisionsOption[decision] == 0:\n if die[index] <= decision:\n result = 5\n else:\n result = 4\n else:\n if die[index] <= decision:\n result = 10\n else:\n result = 1\n \n result = round(result, 2)\n if user.holtlaury_set.count() == 0:\n user.holtlaury_set.create(decision=decision,\n option1=DecisionsOption[1], option2=DecisionsOption[2],\n option3=DecisionsOption[3], option4=DecisionsOption[4],\n option5=DecisionsOption[5], option6=DecisionsOption[6],\n option7=DecisionsOption[7], option8=DecisionsOption[8],\n option9=DecisionsOption[9], option10=DecisionsOption[10],\n die1=die[1], die2=die[2],\n die3=die[3], die4=die[4],\n die5=die[5], die6=die[6],\n die7=die[7], die8=die[8],\n die9=die[9], die10=die[10],\n points=result, originalPoints=result,\n started=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finished=datetime.datetime.now())\n\n return JsonResponse({ 'decision':decision, \n 'die':die[decision], 'result':result })\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef lotteryWillingness(request):\n if request.method == 'POST':\n requestPost = json.loads(request.body.decode('utf-8'))\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('willingnessNum' in requestPost and requestPost['willingnessNum'] != ''):\n willingnessNum = round(requestPost['willingnessNum'], 2)\n user = User.objects.get(username=umid)\n\n willingnessRand = round(randint(0, int((4 - 0) / 0.01)) * 0.01 + 0, 2)\n\n holtLaury = user.holtlaury_set.all()[0]\n result = round(holtLaury.points, 2)\n originalPoints = round(holtLaury.originalPoints, 2)\n if willingnessRand <= willingnessNum:\n result = round(holtLaury.points, 2) - willingnessRand\n decision = holtLaury.decision\n\n die = [None]*11\n die[1] = holtLaury.die1\n die[2] = holtLaury.die2\n die[3] = holtLaury.die3\n die[4] = holtLaury.die4\n die[5] = holtLaury.die5\n die[6] = holtLaury.die6\n die[7] = holtLaury.die7\n die[8] = holtLaury.die8\n die[9] = holtLaury.die9\n die[10] = holtLaury.die10\n option = [None]*11\n option[1] = holtLaury.option1\n option[2] = holtLaury.option2\n option[3] = holtLaury.option3\n option[4] = holtLaury.option4\n option[5] = holtLaury.option5\n option[6] = holtLaury.option6\n option[7] = holtLaury.option7\n option[8] = holtLaury.option8\n option[9] = holtLaury.option9\n option[10] = holtLaury.option10\n\n user.holtlaury_set.update(willingness=willingnessNum, willingnessRand=willingnessRand,\n points=result)\n\n return JsonResponse({ 'decision':decision, 'willingnessRand':willingnessRand,\n 'option':option, 'die':die, 'result':result, 'originalPoints':originalPoints })\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef gamble(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n request.session['started'] = datetime.datetime.now().strftime(\"%b %d %Y %I:%M:%S %p\")\n user = User.objects.get(username=umid)\n gameNum = 1\n if user.firstgame == \"gamble\":\n gameNum = 1\n elif user.secondgame == \"gamble\":\n gameNum = 2\n elif user.thirdgame == \"gamble\":\n gameNum = 3\n if user.gamble_set.count() != 0:\n gamble = user.gamble_set.all()[0]\n chosen = gamble.chosen\n coin = [None]*10\n coin[1] = gamble.coin1\n coin[2] = gamble.coin2\n coin[3] = gamble.coin3\n coin[4] = gamble.coin4\n coin[5] = gamble.coin5\n coin[6] = gamble.coin6\n coin[7] = gamble.coin7\n coin[8] = gamble.coin8\n coin[9] = gamble.coin9\n result = round(gamble.points, 2)\n originalPoints = round(gamble.originalPoints, 2)\n willingnessNum = gamble.willingness\n willingnessRand = gamble.willingnessRand\n context = { 'umid': umid, 'chosen':chosen, 'coin':coin, 'result':result,\n 'originalPoints':originalPoints, 'willingnessNum':willingnessNum,\n 'willingnessRand':willingnessRand, 'gameNum':gameNum }\n return render(request, 'games/Eckel-Grossman Gamble.html', context)\n \n context = { 'umid': umid, 'gameNum':gameNum }\n return render(request, 'games/Eckel-Grossman Gamble.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef gambleSubmit(request):\n if request.method == 'POST':\n requestPost = json.loads(request.body.decode('utf-8'))\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('chosen' in requestPost and requestPost['chosen'] != ''):\n chosen = requestPost['chosen']\n user = User.objects.get(username=umid)\n\n coin = [None]*11\n result = 0\n\n for index in range(1, 10): \n coin[index] = random.getrandbits(1)\n if chosen == index:\n if coin[index]:\n result = 4 + 1 * (index - 1)\n else:\n result = 4 - 0.5 * (index - 1)\n \n result = round(result, 2)\n if user.gamble_set.count() == 0:\n user.gamble_set.create(chosen=chosen,\n coin1=coin[1], coin2=coin[2],\n coin3=coin[3], coin4=coin[4],\n coin5=coin[5], coin6=coin[6],\n coin7=coin[7], coin8=coin[8],\n coin9=coin[9], points=result, originalPoints=result,\n started=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finished=datetime.datetime.now())\n\n return JsonResponse({ 'chosen':chosen, \n 'coin':coin[chosen], 'result':result })\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef gambleWillingness(request):\n if request.method == 'POST':\n requestPost = json.loads(request.body.decode('utf-8'))\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('willingnessNum' in requestPost and requestPost['willingnessNum'] != ''):\n willingnessNum = round(requestPost['willingnessNum'], 2)\n user = User.objects.get(username=umid)\n\n willingnessRand = round(randint(0, int((1 - 0) / 0.01)) * 0.01 + 0, 2)\n\n gamble = user.gamble_set.all()[0]\n result = round(gamble.points, 2)\n if willingnessRand <= willingnessNum:\n result = round(gamble.points, 2) - willingnessRand\n chosen = gamble.chosen\n\n coin = [None]*10\n coin[1] = gamble.coin1\n coin[2] = gamble.coin2\n coin[3] = gamble.coin3\n coin[4] = gamble.coin4\n coin[5] = gamble.coin5\n coin[6] = gamble.coin6\n coin[7] = gamble.coin7\n coin[8] = gamble.coin8\n coin[9] = gamble.coin9\n\n user.gamble_set.update(willingness=willingnessNum, willingnessRand=willingnessRand,\n points=result)\n\n return JsonResponse({ 'chosen':chosen, 'willingnessRand':willingnessRand,\n 'coin':coin, 'result':result })\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef investment(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n request.session['started'] = datetime.datetime.now().strftime(\"%b %d %Y %I:%M:%S %p\")\n user = User.objects.get(username=umid)\n gameNum = 1\n if user.firstgame == \"investment\":\n gameNum = 1\n elif user.secondgame == \"investment\":\n gameNum = 2\n elif user.thirdgame == \"investment\":\n gameNum = 3\n if user.investment_set.count() != 0:\n investment = user.investment_set.all()[0]\n invested = investment.invested\n\n if investment.otherreturned != -1 or investment.otherinvested != -1:\n context = { 'umid': umid, 'invested':invested, 'gameNum':gameNum, 'finished':1 }\n return render(request, 'games/Trust Game.html', context)\n\n context = { 'umid': umid, 'invested':invested, 'gameNum':gameNum }\n return render(request, 'games/Trust Game.html', context)\n \n context = { 'umid': umid, 'gameNum':gameNum }\n return render(request, 'games/Trust Game.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef investmentSubmit(request):\n if request.method == 'POST':\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('invested' in request.POST and request.POST['invested'] != ''):\n invested = request.POST['invested']\n user = User.objects.get(username=umid)\n\n if user.investment_set.count() == 0:\n user.investment_set.create(invested=invested,\n startedinvested=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedinvested=datetime.datetime.now())\n else:\n user.investment_set.update(invested=invested,\n startedinvested=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedinvested=datetime.datetime.now())\n\n return redirect('../returning/2/')\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef returned(request, part):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n request.session['started'] = datetime.datetime.now().strftime(\"%b %d %Y %I:%M:%S %p\")\n user = User.objects.get(username=umid)\n gameNum = 1\n if user.firstgame == \"investment\":\n gameNum = 1\n elif user.secondgame == \"investment\":\n gameNum = 2\n elif user.thirdgame == \"investment\":\n gameNum = 3\n if user.investment_set.count() != 0:\n if part == \"\":\n return redirect('../investment')\n part = int(part)\n if part >= 2 and part <= 7:\n investment = user.investment_set.all()[0]\n for i in range(2, part + 1):\n if i == 2:\n returned = investment.returned0\n elif i == 3:\n returned = investment.returned1\n elif i == 4:\n returned = investment.returned2\n elif i == 5:\n returned = investment.returned3\n elif i == 6:\n returned = investment.returned4\n elif i == 7:\n returned = investment.returned5\n if returned == -1:\n part = i\n context = { 'umid': umid, 'returned':0, 'part':part, 'gameNum':gameNum }\n return render(request, 'games/Trust Game.html', context)\n if investment.otherreturned != -1 or investment.otherinvested != -1:\n context = { 'umid': umid, 'returned':returned, 'part':part, 'gameNum':gameNum, 'finished':1 }\n return render(request, 'games/Trust Game.html', context)\n context = { 'umid': umid, 'returned':returned, 'part':part, 'gameNum':gameNum }\n return render(request, 'games/Trust Game.html', context)\n else:\n return redirect('../investment')\n\n context = { 'umid': umid, 'gameNum':gameNum }\n return render(request, 'games/Trust Game.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef returnedSubmit(request):\n if request.method == 'POST':\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('returned' in request.POST and request.POST['returned'] != '' and\n 'part' in request.POST and request.POST['part'] != ''):\n returned = int(request.POST['returned'])\n part = int(request.POST['part'])\n user = User.objects.get(username=umid)\n if part == 2:\n user.investment_set.update(\n returned0=returned,\n startedreturned0=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedreturned0=datetime.datetime.now())\n return redirect('../returning/3/')\n if part == 3:\n user.investment_set.update(\n returned1=returned,\n startedreturned1=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedreturned1=datetime.datetime.now())\n return redirect('../returning/4/')\n if part == 4:\n user.investment_set.update(\n returned2=returned,\n startedreturned2=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedreturned2=datetime.datetime.now())\n return redirect('../returning/5/')\n if part == 5:\n user.investment_set.update(\n returned3=returned,\n startedreturned3=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedreturned3=datetime.datetime.now())\n return redirect('../returning/6/')\n if part == 6:\n user.investment_set.update(\n returned4=returned,\n startedreturned4=datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p'),\n finishedreturned4=datetime.datetime.now())\n return redirect('../returning/7/')\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef final(request):\n if request.method == 'POST':\n requestPost = json.loads(request.body.decode('utf-8'))\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('returned' in requestPost and requestPost['returned'] != ''):\n returned = int(requestPost['returned'])\n part = 7\n user = User.objects.get(username=umid)\n gameNum = 1\n if user.firstgame == \"investment\":\n gameNum = 1\n elif user.secondgame == \"investment\":\n gameNum = 2\n elif user.thirdgame == \"investment\":\n gameNum = 3\n if user.investment_set.count() != 0:\n investment = user.investment_set.all()[0]\n if investment.otherreturned == -1 and investment.otherinvested == -1:\n for i in range(2, part + 1):\n if i == 2:\n returned = investment.returned0\n elif i == 3:\n returned = investment.returned1\n elif i == 4:\n returned = investment.returned2\n elif i == 5:\n returned = investment.returned3\n elif i == 6:\n returned = investment.returned4\n elif i == 7:\n returned = int(requestPost['returned'])\n investment.returned5 = returned\n investment.startedreturned5 = datetime.datetime.strptime(request.session['started'], '%b %d %Y %I:%M:%S %p')\n investment.finishedreturned5 = datetime.datetime.now()\n investment.save()\n if returned == -1:\n part = i\n context = { 'umid': umid, 'returned':returned, 'part':part, 'gameNum':gameNum }\n return render(request, 'games/Trust Game.html', context)\n \n otherPlayer = None\n otherPlayersComparison = Investment.objects.filter(otherreturned=-1).filter(otherinvested=-1)\n otherPlayers = Investment.objects.filter(user__version='Pilot').exclude(user=user).order_by('?')\n for other in otherPlayers:\n if other.invested != -1 and other.returned5 != -1 and not other in otherPlayersComparison:\n otherPlayer = other\n break\n if otherPlayer == None:\n return JsonResponse({ 'found':0 })\n InvestOrReturn = random.getrandbits(1)\n if InvestOrReturn:\n investAmount = investment.invested\n if investAmount == 0:\n returnAmount = otherPlayer.returned0\n elif investAmount == 1:\n returnAmount = otherPlayer.returned1\n elif investAmount == 2:\n returnAmount = otherPlayer.returned2\n elif investAmount == 3:\n returnAmount = otherPlayer.returned3\n elif investAmount == 4:\n returnAmount = otherPlayer.returned4\n elif investAmount == 5:\n returnAmount = otherPlayer.returned5\n investment.otherreturned = returnAmount\n # otherPlayer.otherinvested = investAmount\n\n investment.points = 5 - investAmount + returnAmount\n # otherPlayer.points = 5 + (3 * investAmount) - returnAmount\n\n else:\n investAmount = otherPlayer.invested\n if investAmount == 0:\n returnAmount = investment.returned0\n elif investAmount == 1:\n returnAmount = investment.returned1\n elif investAmount == 2:\n returnAmount = investment.returned2\n elif investAmount == 3:\n returnAmount = investment.returned3\n elif investAmount == 4:\n returnAmount = investment.returned4\n elif investAmount == 5:\n returnAmount = investment.returned5\n # otherPlayer.otherreturned = returnAmount\n investment.otherinvested = investAmount\n\n # otherPlayer.points = 5 - investAmount + returnAmount\n investment.points = 5 + (3 * investAmount) - returnAmount\n\n investment.otheruser = otherPlayer.user\n # otherPlayer.otheruser = user\n investment.save()\n # otherPlayer.save()\n\n return JsonResponse({ 'InvestOrReturn':InvestOrReturn, \n 'found': 1, 'returnAmount':returnAmount, \n 'investAmount':investAmount, 'points':investment.points })\n else:\n if investment.otherreturned != -1:\n return JsonResponse({ 'InvestOrReturn':True, \n 'found': 1, 'returnAmount':investment.otherreturned, \n 'investAmount':investment.invested, 'points':investment.points })\n elif investment.otherinvested != -1:\n investAmount = investment.otherinvested\n if investAmount == 0:\n returnAmount = investment.returned0\n elif investAmount == 1:\n returnAmount = investment.returned1\n elif investAmount == 2:\n returnAmount = investment.returned2\n elif investAmount == 3:\n returnAmount = investment.returned3\n elif investAmount == 4:\n returnAmount = investment.returned4\n elif investAmount == 5:\n returnAmount = investment.returned5\n return JsonResponse({ 'InvestOrReturn':False, 'found': 1, \n 'returnAmount':returnAmount, \n 'investAmount':investAmount, 'points':investment.points })\n context = { 'umid': umid, 'gameNum':gameNum }\n return render(request, 'games/Trust Game.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef thankyou(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n user = User.objects.get(username=umid)\n holtLauryEarning = round(user.holtlaury_set.all()[0].points,2)\n gambleEarning = round(user.gamble_set.all()[0].points,2)\n investmentEarning = round(user.investment_set.all()[0].points,2)\n experimentEarning = round(holtLauryEarning + gambleEarning + investmentEarning,2)\n totalEarning = round(holtLauryEarning + gambleEarning + investmentEarning + 5,2)\n user.totalearning = round(totalEarning,2)\n user.experimentearning = round(experimentEarning,2)\n try:\n user.startedstudy = datetime.datetime.strptime(request.session['startedStudy'], '%b %d %Y %I:%M:%S %p')\n except:\n try:\n if user.pretest_set.count() != 0:\n pretest = user.pretest_set.all()[0]\n user.startedstudy = pretest.startedquestion1\n print(\"\\n\\n\\nTook started from the pretest.\")\n else:\n user.startedstudy = datetime.datetime.now()\n print(\"\\n\\n\\nTook started from now.\")\n except:\n user.startedstudy = datetime.datetime.now()\n print(\"\\n\\n\\nTook started from now in the except.\")\n user.finishedstudy = datetime.datetime.now()\n user.optout = False\n user.postpone = False\n user.save()\n context = { 'umid': umid, 'holtLauryEarning': \"%.2f\" % holtLauryEarning, 'gambleEarning': gambleEarning, \n 'investmentEarning': \"%.2f\" % investmentEarning, 'experimentEarning': \"%.2f\" % experimentEarning, \n 'totalEarning': \"%.2f\" % totalEarning }\n return render(request, 'games/Thankyou.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef thankyousubmit(request):\n if request.method == 'POST':\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('fullnameInput' in request.POST and \n 'streetInput' in request.POST and 'cityInput' in request.POST and \n 'stateInput' in request.POST and 'zipcodeInput' in request.POST):\n fullnameInput = request.POST['fullnameInput']\n streetInput = request.POST['streetInput']\n cityInput = request.POST['cityInput']\n stateInput = request.POST['stateInput']\n zipcodeInput = request.POST['zipcodeInput']\n\n user = User.objects.get(username=umid)\n user.fullname = fullnameInput\n user.street = streetInput\n user.city = cityInput\n user.state = stateInput\n user.zipcode = zipcodeInput\n user.save()\n\n return JsonResponse({ })\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\n@ensure_csrf_cookie\ndef survey(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n context = { 'umid': umid }\n return render(request, 'games/Survey.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef convertNum2Bool(numObj):\n if numObj == 1 or numObj == \"1\" or numObj == \"true\" or numObj == \"True\":\n return True\n elif numObj == 0 or numObj == \"0\" or numObj == \"false\" or numObj == \"False\":\n return False\n\ndef surveysubmit(request):\n if request.method == 'POST':\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if ('emailsperday' in request.POST and \n 'PCLaptop' in request.POST and 'smartphone' in request.POST and \n 'PAD' in request.POST and 'otherDevices' in request.POST and \n 'yearsOfInternet' in request.POST and 'otherDeviceText' in request.POST):\n emailsperday = request.POST['emailsperday']\n PCLaptop = request.POST['PCLaptop']\n smartphone = request.POST['smartphone']\n PAD = request.POST['PAD']\n otherDevices = request.POST['otherDevices']\n otherDeviceText = request.POST['otherDeviceText']\n yearsOfInternet = request.POST['yearsOfInternet']\n\n user = User.objects.get(username=umid)\n user.emailsperday = emailsperday\n user.ownpc = convertNum2Bool(PCLaptop)\n user.ownsmartphone = convertNum2Bool(smartphone)\n user.ownpda = convertNum2Bool(PAD)\n user.ownotherdevice = convertNum2Bool(otherDevices)\n user.otherdevice = otherDeviceText\n user.internetuse = yearsOfInternet\n user.save()\n\n return JsonResponse({ })\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef postpone(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n user = User.objects.get(username=umid)\n user.optout = False\n user.postpone = True\n user.save()\n context = { 'umid': umid, 'welcomepage': 1 }\n return render(request, 'games/Postpone.html', context)\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef optout(request):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n user = User.objects.get(username=umid)\n user.optout = True\n user.postpone = False\n user.save()\n context = { 'umid': umid, 'welcomepage': 1 }\n return render(request, 'games/NotInterested.html', context)\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef mean(data):\n \"\"\"Return the sample arithmetic mean of data.\"\"\"\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n # in Python 2 use sum(data)/float(n)\n\ndef _ss(data):\n \"\"\"Return sum of square deviations of sequence data.\"\"\"\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss\n\ndef pstdev(data):\n \"\"\"Calculates the population standard deviation.\"\"\"\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5\n\ndef median(lst):\n sortedLst = sorted(lst)\n lstLen = len(lst)\n index = (lstLen - 1) // 2\n\n if (lstLen % 2):\n return sortedLst[index]\n else:\n return (sortedLst[index] + sortedLst[index + 1])/2.0\n\ndef findStatistics(dataObj):\n dataObjMean = round(mean(dataObj),3) if len(dataObj) > 0 else float('nan')\n dataObjMin = round(min(dataObj),3) if len(dataObj) > 0 else float('nan')\n dataObjMax = round(max(dataObj),3) if len(dataObj) > 0 else float('nan')\n dataObjStdev = round(pstdev(dataObj),3) if len(dataObj) > 1 else float('nan')\n dataObjMedian = round(median(dataObj),3) if len(dataObj) > 0 else float('nan')\n dataObjMode = round(max(set(dataObj), key=dataObj.count),3) if len(dataObj) > 0 else float('nan')\n\n return (dataObjMean, dataObjMin, dataObjMax, dataObjStdev, dataObjMedian, dataObjMode)\n\nadminsUniquenames = [\"yanchen\", \"oneweb\", \"arkzhang\"]\n\ndef results(request):\n # try:\n if request.method == 'GET':\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if (umid in adminsUniquenames):\n\n version = \"Pilot\"\n page = \"Overview\"\n if ('version' in request.GET and request.GET['version'] != '' and request.GET['version'] != 'undefined'):\n version = request.GET['version']\n if ('page' in request.GET and request.GET['page'] != '' and request.GET['page'] != 'undefined'):\n page = request.GET['page']\n\n allUsers = User.objects.filter(version=version)\n allUsers = list(allUsers)\n\n questionCorrects = [[] for i in range(8)]\n questionHovers = [[] for i in range(8)]\n usersCorrects = []\n usersHovers = []\n usersDurations = []\n totalCorrects = []\n totalHovers = []\n corrects = [0]*8\n clicks = [0]*8\n rightClicks = [0]*8\n questionhovereds = [0]*8\n hoveredsseconds = [[] for i in range(8)]\n secondsDurations = [[] for i in range(8)]\n\n userIndex = 0\n allUsersLen = len(allUsers)\n while userIndex < allUsersLen:\n user = allUsers[userIndex]\n pretest = user.pretest_set.all()\n if len(pretest) > 0:\n pretest = pretest[0]\n usersCorrects.append(0)\n usersHovers.append(0)\n usersDurations.append(0)\n userCorrect = [0]*8\n userCorrect[1] = pretest.correct1\n if userCorrect[1] == 2:\n userCorrect[1] = 0\n userCorrect[2] = pretest.correct2\n if userCorrect[2] == 2:\n userCorrect[2] = 0\n userCorrect[3] = pretest.correct3\n if userCorrect[3] == 2:\n userCorrect[3] = 0\n userCorrect[4] = pretest.correct4\n if userCorrect[4] == 2:\n userCorrect[4] = 0\n userCorrect[5] = pretest.correct5\n if userCorrect[5] == 2:\n userCorrect[5] = 0\n userCorrect[6] = pretest.correct6\n if userCorrect[6] == 2:\n userCorrect[6] = 0\n userCorrect[7] = pretest.correct7\n if userCorrect[7] == 2:\n userCorrect[7] = 0\n for index in range(1, 8):\n corrects[index] += userCorrect[index]\n usersCorrects[userIndex] += userCorrect[index]\n questionCorrects[index].append(userCorrect[index])\n totalCorrects.append(userCorrect[index])\n rightClicks[1] += pretest.questionrightclicked1\n rightClicks[2] += pretest.questionrightclicked2\n rightClicks[3] += pretest.questionrightclicked3\n rightClicks[4] += pretest.questionrightclicked4\n rightClicks[5] += pretest.questionrightclicked5\n rightClicks[6] += pretest.questionrightclicked6\n rightClicks[7] += pretest.questionrightclicked7\n questionhovereds[1] += pretest.questionhovered1\n questionhovereds[2] += pretest.questionhovered2\n questionhovereds[3] += pretest.questionhovered3\n questionhovereds[4] += pretest.questionhovered4\n questionhovereds[5] += pretest.questionhovered5\n questionhovereds[6] += pretest.questionhovered6\n questionhovereds[7] += pretest.questionhovered7\n\n hoveredsseconds[1].append(pretest.questionhoveredseconds1)\n questionHovers[1].append(pretest.questionhoveredseconds1)\n usersHovers[userIndex] += pretest.questionhoveredseconds1\n totalHovers.append(pretest.questionhoveredseconds1)\n\n hoveredsseconds[2].append(pretest.questionhoveredseconds2)\n questionHovers[2].append(pretest.questionhoveredseconds2)\n usersHovers[userIndex] += pretest.questionhoveredseconds2\n totalHovers.append(pretest.questionhoveredseconds2)\n\n hoveredsseconds[3].append(pretest.questionhoveredseconds3)\n questionHovers[3].append(pretest.questionhoveredseconds3)\n usersHovers[userIndex] += pretest.questionhoveredseconds3\n totalHovers.append(pretest.questionhoveredseconds3)\n\n hoveredsseconds[4].append(pretest.questionhoveredseconds4)\n questionHovers[4].append(pretest.questionhoveredseconds4)\n usersHovers[userIndex] += pretest.questionhoveredseconds4\n totalHovers.append(pretest.questionhoveredseconds4)\n\n hoveredsseconds[5].append(pretest.questionhoveredseconds5)\n questionHovers[5].append(pretest.questionhoveredseconds5)\n usersHovers[userIndex] += pretest.questionhoveredseconds5\n totalHovers.append(pretest.questionhoveredseconds5)\n\n hoveredsseconds[6].append(pretest.questionhoveredseconds6)\n questionHovers[6].append(pretest.questionhoveredseconds6)\n usersHovers[userIndex] += pretest.questionhoveredseconds6\n totalHovers.append(pretest.questionhoveredseconds6)\n\n hoveredsseconds[7].append(pretest.questionhoveredseconds7)\n questionHovers[7].append(pretest.questionhoveredseconds7)\n usersHovers[userIndex] += pretest.questionhoveredseconds7\n totalHovers.append(pretest.questionhoveredseconds7)\n\n secondsDuration = [0]*8\n secondsDuration[1] = pretest.finishedquestion1 - pretest.startedquestion1\n secondsDuration[2] = pretest.finishedquestion2 - pretest.startedquestion2\n secondsDuration[3] = pretest.finishedquestion3 - pretest.startedquestion3\n secondsDuration[4] = pretest.finishedquestion4 - pretest.startedquestion4\n secondsDuration[5] = pretest.finishedquestion5 - pretest.startedquestion5\n secondsDuration[6] = pretest.finishedquestion6 - pretest.startedquestion6\n secondsDuration[7] = pretest.finishedquestion7 - pretest.startedquestion7\n for index in range(1, 8):\n secondsDuration[index] = secondsDuration[index].total_seconds()\n secondsDurations[index].append(secondsDuration[index])\n usersDurations[userIndex] += secondsDuration[index]\n\n userIndex += 1\n else:\n del allUsers[userIndex]\n allUsersLen -= 1\n\n usersCorrects = array(usersCorrects)\n usersHovers = array(usersHovers)\n usersDurations = array(usersDurations)\n\n if page == \"Overview\":\n experimentEarnings = []\n minutesDurations = []\n\n experimentEarningsTotal = 0\n\n for user in allUsers:\n experimentEarningsTotal += user.experimentearning\n experimentEarnings.append(user.experimentearning)\n timedeltaDuration = user.finishedstudy - user.startedstudy\n secondsDuration = timedeltaDuration.total_seconds()\n minutesDuration = secondsDuration // 60\n minutesDurations.append(minutesDuration)\n\n (experimentEarningsMean, experimentEarningsMin, experimentEarningsMax, \n experimentEarningsStdev, experimentEarningsMedian, \n experimentEarningsMode) = findStatistics(experimentEarnings)\n\n (minutesDurationsMean, minutesDurationsMin, minutesDurationsMax, \n minutesDurationsStdev, minutesDurationsMedian, \n minutesDurationsMode) = findStatistics(minutesDurations)\n\n context = { 'umid': umid, 'allUsers': allUsers, 'welcomepage': 1, 'welcomepage': 1, 'page': page, 'version': version, \n 'experimentEarningsMean': experimentEarningsMean, 'experimentEarningsTotal': round(experimentEarningsTotal,3), \n 'experimentEarningsMin': experimentEarningsMin, 'experimentEarningsMax': experimentEarningsMax, \n 'experimentEarningsStdev': experimentEarningsStdev, 'experimentEarningsMedian': experimentEarningsMedian, \n 'experimentEarningsMode': experimentEarningsMode, \n 'minutesDurations': minutesDurations, 'minutesDurationsMean': minutesDurationsMean, \n 'minutesDurationsMin': minutesDurationsMin, 'minutesDurationsMax': minutesDurationsMax, \n 'minutesDurationsStdev': minutesDurationsStdev, 'minutesDurationsMedian': minutesDurationsMedian, \n 'minutesDurationsMode': minutesDurationsMode }\n\n elif page == \"Pretest\":\n hoveredssecondsMean = [0]*8\n hoveredssecondsMin = [0]*8\n hoveredssecondsMax = [0]*8\n hoveredssecondsStdev = [0]*8\n hoveredssecondsMedian = [0]*8\n hoveredssecondsMode = [0]*8\n for index in range(1, 8):\n (hoveredssecondsMean[index], hoveredssecondsMin[index], hoveredssecondsMax[index], \n hoveredssecondsStdev[index], hoveredssecondsMedian[index], \n hoveredssecondsMode[index]) = findStatistics(hoveredsseconds[index])\n\n secondsDurationsMean = [0]*8\n secondsDurationsMin = [0]*8\n secondsDurationsMax = [0]*8\n secondsDurationsStdev = [0]*8\n secondsDurationsMedian = [0]*8\n secondsDurationsMode = [0]*8\n for index in range(1, 8):\n (secondsDurationsMean[index], secondsDurationsMin[index], secondsDurationsMax[index], \n secondsDurationsStdev[index], secondsDurationsMedian[index], \n secondsDurationsMode[index]) = findStatistics(secondsDurations[index])\n\n usersHoversSlope, usersHoversIntercept, usersHoversR_value, usersHoversP_value, usersHoversStd_err = stats.linregress(usersHovers,usersCorrects[0:len(usersHovers)])\n\n usersHoversSlope = round(usersHoversSlope,3)\n usersHoversIntercept = round(usersHoversIntercept,3)\n usersHoversR_value = round(usersHoversR_value,3)\n usersHoversP_value = round(usersHoversP_value,3)\n usersHoversStd_err = round(usersHoversStd_err,3)\n\n usersDurationsSlope, usersDurationsIntercept, usersDurationsR_value, usersDurationsP_value, usersDurationsStd_err = stats.linregress(usersDurations,usersCorrects[0:len(usersDurations)])\n\n usersDurationsSlope = round(usersDurationsSlope,3)\n usersDurationsIntercept = round(usersDurationsIntercept,3)\n usersDurationsR_value = round(usersDurationsR_value,3)\n usersDurationsP_value = round(usersDurationsP_value,3)\n usersDurationsStd_err = round(usersDurationsStd_err,3)\n\n context = { 'umid': umid, 'allUsers': allUsers, 'welcomepage': 1, 'page': page, 'version': version, \n 'corrects': corrects, 'usersCorrects': usersCorrects, \n 'clicks': clicks, \n 'rightClicks': rightClicks, \n 'questionhovereds': questionhovereds, \n 'hoveredssecondsMean': hoveredssecondsMean, \n 'hoveredssecondsMin': hoveredssecondsMin, 'hoveredssecondsMax': hoveredssecondsMax, \n 'hoveredssecondsStdev': hoveredssecondsStdev, 'hoveredssecondsMedian': hoveredssecondsMedian, \n 'hoveredssecondsMode': hoveredssecondsMode, \n 'usersHovers': usersHovers, 'usersHoversSlope': usersHoversSlope, \n 'usersHoversIntercept':usersHoversIntercept, 'usersHoversR_value': usersHoversR_value, \n 'usersHoversP_value': usersHoversP_value, 'usersHoversStd_err': usersHoversStd_err,\n 'usersDurations': usersDurations, 'usersDurationsSlope': usersDurationsSlope, \n 'usersDurationsIntercept':usersDurationsIntercept, 'usersDurationsR_value': usersDurationsR_value, \n 'usersDurationsP_value': usersDurationsP_value, 'usersDurationsStd_err': usersDurationsStd_err,\n 'secondsDurations': secondsDurations, 'secondsDurationsMean': secondsDurationsMean, \n 'secondsDurationsMin': secondsDurationsMin, 'secondsDurationsMax': secondsDurationsMax, \n 'secondsDurationsStdev': secondsDurationsStdev, 'secondsDurationsMedian': secondsDurationsMedian, \n 'secondsDurationsMode': secondsDurationsMode, \n }\n\n elif page == \"Lottery\":\n options = [0]*11\n totalCorrectsPerOption = [0]*11\n numOfCorrectsPerOption = [0]*11\n totalPoints = 0\n totalOriginalPoints = 0\n totalWillingness = 0\n points = []\n originalPoints = []\n willingness = []\n secondsDurations = []\n usersRiskAversion = []\n usersWillingness = []\n usersLotteryDurations = []\n\n userIndex = 0\n allUsersLen = len(allUsers)\n while userIndex < allUsersLen:\n user = allUsers[userIndex]\n holtlaury = user.holtlaury_set.all()\n if len(holtlaury) > 0:\n holtlaury = holtlaury[0]\n usersRiskAversion.append(0)\n usersWillingness.append(0)\n usersLotteryDurations.append(0)\n option = [0]*11\n option[1] = int(holtlaury.option1)\n option[2] = int(holtlaury.option2)\n option[3] = int(holtlaury.option3)\n option[4] = int(holtlaury.option4)\n option[5] = int(holtlaury.option5)\n option[6] = int(holtlaury.option6)\n option[7] = int(holtlaury.option7)\n option[8] = int(holtlaury.option8)\n option[9] = int(holtlaury.option9)\n option[10] = int(holtlaury.option10)\n rationalUser = True\n if option[10] == 0:\n del allUsers[userIndex]\n usersCorrects = numpy.delete(usersCorrects, [userIndex])\n del usersRiskAversion[userIndex]\n del usersWillingness[userIndex]\n del usersLotteryDurations[userIndex]\n rationalUser = False\n allUsersLen -= 1\n continue\n for index in range(1, 11):\n if option[index] == 1 and usersRiskAversion[userIndex] == 0:\n usersRiskAversion[userIndex] = index\n elif option[index] == 0 and usersRiskAversion[userIndex] != 0:\n del allUsers[userIndex]\n usersCorrects = numpy.delete(usersCorrects, [userIndex])\n del usersRiskAversion[userIndex]\n del usersWillingness[userIndex]\n del usersLotteryDurations[userIndex]\n rationalUser = False\n allUsersLen -= 1\n break\n if rationalUser:\n for index in range(1, 11):\n options[index] += option[index]\n if option[index] != 0:\n totalCorrectsPerOption[index] += usersCorrects[userIndex]\n numOfCorrectsPerOption[index] += 1\n points.append(holtlaury.points)\n totalPoints += holtlaury.points\n originalPoints.append(holtlaury.originalPoints)\n totalOriginalPoints += holtlaury.originalPoints\n willingness.append(holtlaury.willingness)\n usersWillingness[userIndex] = holtlaury.willingness\n totalWillingness += holtlaury.willingness\n\n secondsDuration = holtlaury.finished - holtlaury.started\n secondsDuration = secondsDuration.total_seconds()\n secondsDurations.append(secondsDuration)\n usersLotteryDurations[userIndex] = secondsDuration\n\n userIndex += 1\n else:\n del allUsers[userIndex]\n usersCorrects = numpy.delete(usersCorrects, [userIndex])\n allUsersLen -= 1\n\n (pointsMean, pointsMin, pointsMax, \n pointsStdev, pointsMedian, \n pointsMode) = findStatistics(points)\n (originalPointsMean, originalPointsMin, originalPointsMax, \n originalPointsStdev, originalPointsMedian, \n originalPointsMode) = findStatistics(originalPoints)\n (willingnessMean, willingnessMin, willingnessMax, \n willingnessStdev, willingnessMedian, \n willingnessMode) = findStatistics(willingness)\n\n (secondsDurationsMean, secondsDurationsMin, secondsDurationsMax, \n secondsDurationsStdev, secondsDurationsMedian, \n secondsDurationsMode) = findStatistics(secondsDurations)\n\n usersRiskAversionSlope, usersRiskAversionIntercept, usersRiskAversionR_value, usersRiskAversionP_value, usersRiskAversionStd_err = stats.linregress(usersRiskAversion,usersCorrects)\n\n usersRiskAversionSlope = round(usersRiskAversionSlope,3)\n usersRiskAversionIntercept = round(usersRiskAversionIntercept,3)\n usersRiskAversionR_value = round(usersRiskAversionR_value,3)\n usersRiskAversionP_value = round(usersRiskAversionP_value,3)\n usersRiskAversionStd_err = round(usersRiskAversionStd_err,3)\n\n usersWillingnessSlope, usersWillingnessIntercept, usersWillingnessR_value, usersWillingnessP_value, usersWillingnessStd_err = stats.linregress(usersWillingness,usersCorrects)\n\n usersWillingnessSlope = round(usersWillingnessSlope,3)\n usersWillingnessIntercept = round(usersWillingnessIntercept,3)\n usersWillingnessR_value = round(usersWillingnessR_value,3)\n usersWillingnessP_value = round(usersWillingnessP_value,3)\n usersWillingnessStd_err = round(usersWillingnessStd_err,3)\n\n usersLotteryDurationsSlope, usersLotteryDurationsIntercept, usersLotteryDurationsR_value, usersLotteryDurationsP_value, usersLotteryDurationsStd_err = stats.linregress(usersLotteryDurations,usersCorrects)\n\n usersLotteryDurationsSlope = round(usersLotteryDurationsSlope,3)\n usersLotteryDurationsIntercept = round(usersLotteryDurationsIntercept,3)\n usersLotteryDurationsR_value = round(usersLotteryDurationsR_value,3)\n usersLotteryDurationsP_value = round(usersLotteryDurationsP_value,3)\n usersLotteryDurationsStd_err = round(usersLotteryDurationsStd_err,3)\n\n usersLotteryDurationsRiskAversionSlope, usersLotteryDurationsRiskAversionIntercept, usersLotteryDurationsRiskAversionR_value, usersLotteryDurationsRiskAversionP_value, usersLotteryDurationsRiskAversionStd_err = stats.linregress(usersLotteryDurations,usersRiskAversion)\n\n usersLotteryDurationsRiskAversionSlope = round(usersLotteryDurationsRiskAversionSlope,3)\n usersLotteryDurationsRiskAversionIntercept = round(usersLotteryDurationsRiskAversionIntercept,3)\n usersLotteryDurationsRiskAversionR_value = round(usersLotteryDurationsRiskAversionR_value,3)\n usersLotteryDurationsRiskAversionP_value = round(usersLotteryDurationsRiskAversionP_value,3)\n usersLotteryDurationsRiskAversionStd_err = round(usersLotteryDurationsRiskAversionStd_err,3)\n\n context = { 'umid': umid, 'allUsers': allUsers, 'welcomepage': 1, 'page': page, 'version': version, \n 'options': options, 'totalPoints': round(totalPoints,3), 'points': points, 'usersCorrects': usersCorrects, \n 'totalCorrectsPerOption': totalCorrectsPerOption, 'numOfCorrectsPerOption': numOfCorrectsPerOption, 'pointsMean': pointsMean, \n 'pointsMin': pointsMin, 'pointsMax': pointsMax, \n 'pointsStdev': pointsStdev, 'pointsMedian': pointsMedian, \n 'pointsMode': pointsMode, \n 'originalPoints': originalPoints, 'totalOriginalPoints': round(totalOriginalPoints,3), 'originalPointsMean': originalPointsMean, \n 'originalPointsMin': originalPointsMin, 'originalPointsMax': originalPointsMax, \n 'originalPointsStdev': originalPointsStdev, 'originalPointsMedian': originalPointsMedian, \n 'originalPointsMode': originalPointsMode, \n 'willingness': willingness, 'totalWillingness': round(totalWillingness,3), 'willingnessMean': willingnessMean, \n 'willingnessMin': willingnessMin, 'willingnessMax': willingnessMax, \n 'willingnessStdev': willingnessStdev, 'willingnessMedian': willingnessMedian, \n 'willingnessMode': willingnessMode, \n 'secondsDurations': secondsDurations, 'secondsDurationsMean': secondsDurationsMean, \n 'secondsDurationsMin': secondsDurationsMin, 'secondsDurationsMax': secondsDurationsMax, \n 'secondsDurationsStdev': secondsDurationsStdev, 'secondsDurationsMedian': secondsDurationsMedian, \n 'secondsDurationsMode': secondsDurationsMode, \n 'usersRiskAversion': usersRiskAversion, 'usersRiskAversionSlope': usersRiskAversionSlope, \n 'usersRiskAversionIntercept':usersRiskAversionIntercept, 'usersRiskAversionR_value': usersRiskAversionR_value, \n 'usersRiskAversionP_value': usersRiskAversionP_value, 'usersRiskAversionStd_err': usersRiskAversionStd_err,\n 'usersWillingness': usersWillingness, 'usersWillingnessSlope': usersWillingnessSlope, \n 'usersWillingnessIntercept':usersWillingnessIntercept, 'usersWillingnessR_value': usersWillingnessR_value, \n 'usersWillingnessP_value': usersWillingnessP_value, 'usersWillingnessStd_err': usersWillingnessStd_err,\n 'usersLotteryDurations': usersLotteryDurations, 'usersLotteryDurationsSlope': usersLotteryDurationsSlope, \n 'usersLotteryDurationsIntercept':usersLotteryDurationsIntercept, 'usersLotteryDurationsR_value': usersLotteryDurationsR_value, \n 'usersLotteryDurationsP_value': usersLotteryDurationsP_value, 'usersLotteryDurationsStd_err': usersLotteryDurationsStd_err,\n 'usersLotteryDurationsRiskAversionSlope': usersLotteryDurationsRiskAversionSlope, \n 'usersLotteryDurationsRiskAversionIntercept':usersLotteryDurationsRiskAversionIntercept, 'usersLotteryDurationsRiskAversionR_value': usersLotteryDurationsRiskAversionR_value, \n 'usersLotteryDurationsRiskAversionP_value': usersLotteryDurationsRiskAversionP_value, 'usersLotteryDurationsRiskAversionStd_err': usersLotteryDurationsRiskAversionStd_err,\n }\n\n elif page == \"Gamble\":\n usersRiskAversion = []\n totalCorrectsPerOption = [0]*11\n numOfCorrectsPerOption = [0]*11\n\n chosens = []\n points = []\n secondsDurations = []\n totalPoints = 0\n\n userIndex = 0\n allUsersLen = len(allUsers)\n while userIndex < allUsersLen:\n user = allUsers[userIndex]\n holtlaury = user.holtlaury_set.all()\n if len(holtlaury) > 0:\n holtlaury = holtlaury[0]\n usersRiskAversion.append(0)\n option = [0]*11\n option[1] = int(holtlaury.option1)\n option[2] = int(holtlaury.option2)\n option[3] = int(holtlaury.option3)\n option[4] = int(holtlaury.option4)\n option[5] = int(holtlaury.option5)\n option[6] = int(holtlaury.option6)\n option[7] = int(holtlaury.option7)\n option[8] = int(holtlaury.option8)\n option[9] = int(holtlaury.option9)\n option[10] = int(holtlaury.option10)\n rationalUser = True\n if option[10] == 0:\n del allUsers[userIndex]\n usersCorrects = numpy.delete(usersCorrects, [userIndex])\n del usersRiskAversion[userIndex]\n rationalUser = False\n allUsersLen -= 1\n continue\n for index in range(1, 11):\n if option[index] == 1 and usersRiskAversion[userIndex] == 0:\n usersRiskAversion[userIndex] = index\n elif option[index] == 0 and usersRiskAversion[userIndex] != 0:\n del allUsers[userIndex]\n usersCorrects = numpy.delete(usersCorrects, [userIndex])\n del usersRiskAversion[userIndex]\n rationalUser = False\n allUsersLen -= 1\n break\n if rationalUser:\n gamble = user.gamble_set.all()\n if len(gamble) > 0:\n gamble = gamble[0]\n chosens.append(gamble.chosen)\n totalCorrectsPerOption[gamble.chosen] += usersCorrects[userIndex]\n numOfCorrectsPerOption[gamble.chosen] += 1\n points.append(gamble.points)\n totalPoints += gamble.points\n\n secondsDuration = gamble.finished - gamble.started\n secondsDuration = secondsDuration.total_seconds()\n secondsDurations.append(secondsDuration)\n\n userIndex += 1\n else:\n del allUsers[userIndex]\n usersCorrects = numpy.delete(usersCorrects, [userIndex])\n allUsersLen -= 1\n\n (pointsMean, pointsMin, pointsMax, \n pointsStdev, pointsMedian, \n pointsMode) = findStatistics(points)\n\n (chosensMean, chosensMin, chosensMax, \n chosensStdev, chosensMedian, \n chosensMode) = findStatistics(chosens)\n\n (secondsDurationsMean, secondsDurationsMin, secondsDurationsMax, \n secondsDurationsStdev, secondsDurationsMedian, \n secondsDurationsMode) = findStatistics(secondsDurations)\n\n usersRiskSeekingSlope, usersRiskSeekingIntercept, usersRiskSeekingR_value, usersRiskSeekingP_value, usersRiskSeekingStd_err = stats.linregress(chosens,usersCorrects[0:len(chosens)])\n\n usersRiskSeekingSlope = round(usersRiskSeekingSlope,3)\n usersRiskSeekingIntercept = round(usersRiskSeekingIntercept,3)\n usersRiskSeekingR_value = round(usersRiskSeekingR_value,3)\n usersRiskSeekingP_value = round(usersRiskSeekingP_value,3)\n usersRiskSeekingStd_err = round(usersRiskSeekingStd_err,3)\n\n usersRiskSeekingRiskAversionSlope, usersRiskSeekingRiskAversionIntercept, usersRiskSeekingRiskAversionR_value, usersRiskSeekingRiskAversionP_value, usersRiskSeekingRiskAversionStd_err = stats.linregress(chosens,usersRiskAversion[0:len(chosens)])\n\n usersRiskSeekingRiskAversionSlope = round(usersRiskSeekingRiskAversionSlope,3)\n usersRiskSeekingRiskAversionIntercept = round(usersRiskSeekingRiskAversionIntercept,3)\n usersRiskSeekingRiskAversionR_value = round(usersRiskSeekingRiskAversionR_value,3)\n usersRiskSeekingRiskAversionP_value = round(usersRiskSeekingRiskAversionP_value,3)\n usersRiskSeekingRiskAversionStd_err = round(usersRiskSeekingRiskAversionStd_err,3)\n\n usersGambleDurationsSlope, usersGambleDurationsIntercept, usersGambleDurationsR_value, usersGambleDurationsP_value, usersGambleDurationsStd_err = stats.linregress(secondsDurations,usersCorrects[0:len(secondsDurations)])\n\n usersGambleDurationsSlope = round(usersGambleDurationsSlope,3)\n usersGambleDurationsIntercept = round(usersGambleDurationsIntercept,3)\n usersGambleDurationsR_value = round(usersGambleDurationsR_value,3)\n usersGambleDurationsP_value = round(usersGambleDurationsP_value,3)\n usersGambleDurationsStd_err = round(usersGambleDurationsStd_err,3)\n\n usersGambleDurationsRiskSeekingSlope, usersGambleDurationsRiskSeekingIntercept, usersGambleDurationsRiskSeekingR_value, usersGambleDurationsRiskSeekingP_value, usersGambleDurationsRiskSeekingStd_err = stats.linregress(secondsDurations,chosens[0:len(secondsDurations)])\n\n usersGambleDurationsRiskSeekingSlope = round(usersGambleDurationsRiskSeekingSlope,3)\n usersGambleDurationsRiskSeekingIntercept = round(usersGambleDurationsRiskSeekingIntercept,3)\n usersGambleDurationsRiskSeekingR_value = round(usersGambleDurationsRiskSeekingR_value,3)\n usersGambleDurationsRiskSeekingP_value = round(usersGambleDurationsRiskSeekingP_value,3)\n usersGambleDurationsRiskSeekingStd_err = round(usersGambleDurationsRiskSeekingStd_err,3)\n\n context = { 'umid': umid, 'allUsers': allUsers, 'welcomepage': 1, 'page': page, 'version': version, \n 'totalPoints': round(totalPoints,3), 'points': points, 'usersCorrects': usersCorrects, \n 'totalCorrectsPerOption': totalCorrectsPerOption, 'numOfCorrectsPerOption': numOfCorrectsPerOption, 'pointsMean': pointsMean, \n 'pointsMin': pointsMin, 'pointsMax': pointsMax, \n 'pointsStdev': pointsStdev, 'pointsMedian': pointsMedian, \n 'pointsMode': pointsMode, \n 'chosens': chosens, 'chosensMean': chosensMean, \n 'chosensMin': chosensMin, 'chosensMax': chosensMax, \n 'chosensStdev': chosensStdev, 'chosensMedian': chosensMedian, \n 'chosensMode': chosensMode, \n 'secondsDurations': secondsDurations, 'secondsDurationsMean': secondsDurationsMean, \n 'secondsDurationsMin': secondsDurationsMin, 'secondsDurationsMax': secondsDurationsMax, \n 'secondsDurationsStdev': secondsDurationsStdev, 'secondsDurationsMedian': secondsDurationsMedian, \n 'secondsDurationsMode': secondsDurationsMode, \n 'usersRiskSeekingSlope': usersRiskSeekingSlope, \n 'usersRiskSeekingIntercept':usersRiskSeekingIntercept, 'usersRiskSeekingR_value': usersRiskSeekingR_value, \n 'usersRiskSeekingP_value': usersRiskSeekingP_value, 'usersRiskSeekingStd_err': usersRiskSeekingStd_err,\n 'usersRiskAversion':usersRiskAversion, 'usersRiskSeekingRiskAversionSlope': usersRiskSeekingRiskAversionSlope, \n 'usersRiskSeekingRiskAversionIntercept':usersRiskSeekingRiskAversionIntercept, 'usersRiskSeekingRiskAversionR_value': usersRiskSeekingRiskAversionR_value, \n 'usersRiskSeekingRiskAversionP_value': usersRiskSeekingRiskAversionP_value, 'usersRiskSeekingRiskAversionStd_err': usersRiskSeekingRiskAversionStd_err,\n 'usersGambleDurationsSlope': usersGambleDurationsSlope, \n 'usersGambleDurationsIntercept':usersGambleDurationsIntercept, 'usersGambleDurationsR_value': usersGambleDurationsR_value, \n 'usersGambleDurationsP_value': usersGambleDurationsP_value, 'usersGambleDurationsStd_err': usersGambleDurationsStd_err,\n 'usersGambleDurationsRiskSeekingSlope': usersGambleDurationsRiskSeekingSlope, \n 'usersGambleDurationsRiskSeekingIntercept':usersGambleDurationsRiskSeekingIntercept, 'usersGambleDurationsRiskSeekingR_value': usersGambleDurationsRiskSeekingR_value, \n 'usersGambleDurationsRiskSeekingP_value': usersGambleDurationsRiskSeekingP_value, 'usersGambleDurationsRiskSeekingStd_err': usersGambleDurationsRiskSeekingStd_err,\n }\n\n elif page == \"Investment\":\n invested = []\n returned = [[] for i in range(6)]\n points = []\n totalPoints = 0\n secondsInvestedDurations = []\n secondsReturnedDurations = [[] for i in range(6)]\n\n for user in allUsers:\n investment = user.investment_set.all()\n if len(investment) > 0:\n investment = investment[0]\n invested.append(investment.invested)\n returned[0].append(investment.returned0)\n returned[1].append(investment.returned1)\n returned[2].append(investment.returned2)\n returned[3].append(investment.returned3)\n returned[4].append(investment.returned4)\n returned[5].append(investment.returned5)\n points.append(investment.points)\n totalPoints += investment.points\n\n secondsInvestedDuration = investment.finishedinvested - investment.startedinvested\n secondsInvestedDuration = secondsInvestedDuration.total_seconds()\n secondsInvestedDurations.append(secondsInvestedDuration)\n secondsReturnedDuration = [0]*6\n secondsReturnedDuration[0] = investment.finishedreturned0 - investment.startedreturned0\n secondsReturnedDuration[1] = investment.finishedreturned1 - investment.startedreturned1\n secondsReturnedDuration[2] = investment.finishedreturned2 - investment.startedreturned2\n secondsReturnedDuration[3] = investment.finishedreturned3 - investment.startedreturned3\n secondsReturnedDuration[4] = investment.finishedreturned4 - investment.startedreturned4\n secondsReturnedDuration[5] = investment.finishedreturned5 - investment.startedreturned5\n for index in range(0, 6):\n secondsReturnedDuration[index] = secondsReturnedDuration[index].total_seconds()\n secondsReturnedDurations[index].append(secondsReturnedDuration[index])\n\n (investedMean, investedMin, investedMax, \n investedStdev, investedMedian, \n investedMode) = findStatistics(invested)\n\n returnedMean = [0]*6\n returnedMin = [0]*6\n returnedMax = [0]*6\n returnedStdev = [0]*6\n returnedMedian = [0]*6\n returnedMode = [0]*6\n for index in range(0, 6):\n (returnedMean[index], returnedMin[index], returnedMax[index], \n returnedStdev[index], returnedMedian[index], \n returnedMode[index]) = findStatistics(returned[index])\n\n (pointsMean, pointsMin, pointsMax, \n pointsStdev, pointsMedian, \n pointsMode) = findStatistics(points)\n\n (secondsInvestedDurationsMean, secondsInvestedDurationsMin, secondsInvestedDurationsMax, \n secondsInvestedDurationsStdev, secondsInvestedDurationsMedian, \n secondsInvestedDurationsMode) = findStatistics(secondsInvestedDurations)\n\n secondsReturnedDurationsMean = [0]*6\n secondsReturnedDurationsMin = [0]*6\n secondsReturnedDurationsMax = [0]*6\n secondsReturnedDurationsStdev = [0]*6\n secondsReturnedDurationsMedian = [0]*6\n secondsReturnedDurationsMode = [0]*6\n for index in range(0, 6):\n (secondsReturnedDurationsMean[index], secondsReturnedDurationsMin[index], secondsReturnedDurationsMax[index], \n secondsReturnedDurationsStdev[index], secondsReturnedDurationsMedian[index], \n secondsReturnedDurationsMode[index]) = findStatistics(secondsReturnedDurations[index])\n\n usersTrustInvestmentSlope, usersTrustInvestmentIntercept, usersTrustInvestmentR_value, usersTrustInvestmentP_value, usersTrustInvestmentStd_err = stats.linregress(invested, usersCorrects[0:len(invested)])\n\n usersTrustInvestmentSlope = round(usersTrustInvestmentSlope,3)\n usersTrustInvestmentIntercept = round(usersTrustInvestmentIntercept,3)\n usersTrustInvestmentR_value = round(usersTrustInvestmentR_value,3)\n usersTrustInvestmentP_value = round(usersTrustInvestmentP_value,3)\n usersTrustInvestmentStd_err = round(usersTrustInvestmentStd_err,3)\n\n usersInvestmentDurationsSlope, usersInvestmentDurationsIntercept, usersInvestmentDurationsR_value, usersInvestmentDurationsP_value, usersInvestmentDurationsStd_err = stats.linregress(secondsInvestedDurations, invested[0:len(secondsInvestedDurations)])\n\n usersInvestmentDurationsSlope = round(usersInvestmentDurationsSlope,3)\n usersInvestmentDurationsIntercept = round(usersInvestmentDurationsIntercept,3)\n usersInvestmentDurationsR_value = round(usersInvestmentDurationsR_value,3)\n usersInvestmentDurationsP_value = round(usersInvestmentDurationsP_value,3)\n usersInvestmentDurationsStd_err = round(usersInvestmentDurationsStd_err,3)\n\n usersTrustreturnedSlope = [0]*6\n usersTrustreturnedIntercept = [0]*6\n usersTrustreturnedR_value = [0]*6\n usersTrustreturnedP_value = [0]*6\n usersTrustreturnedStd_err = [0]*6\n for index in range(0, 6):\n usersTrustreturnedSlope[index], usersTrustreturnedIntercept[index], usersTrustreturnedR_value[index], usersTrustreturnedP_value[index], usersTrustreturnedStd_err[index] = stats.linregress(returned[index], usersCorrects[0:len(returned[index])])\n\n usersTrustreturnedSlope[index] = round(usersTrustreturnedSlope[index],3)\n usersTrustreturnedIntercept[index] = round(usersTrustreturnedIntercept[index],3)\n usersTrustreturnedR_value[index] = round(usersTrustreturnedR_value[index],3)\n usersTrustreturnedP_value[index] = round(usersTrustreturnedP_value[index],3)\n usersTrustreturnedStd_err[index] = round(usersTrustreturnedStd_err[index],3)\n\n context = { 'umid': umid, 'allUsers': allUsers, 'welcomepage': 1, 'page': page, 'version': version, 'usersCorrects': usersCorrects, \n 'invested': invested, 'investedMean': investedMean, \n 'investedMin': investedMin, 'investedMax': investedMax, \n 'investedStdev': investedStdev, 'investedMedian': investedMedian, \n 'investedMode': investedMode, \n 'returned': returned, 'returnedMean': returnedMean, \n 'returnedMin': returnedMin, 'returnedMax': returnedMax, \n 'returnedStdev': returnedStdev, 'returnedMedian': returnedMedian, \n 'returnedMode': returnedMode, \n 'totalPoints': round(totalPoints,3), 'points': points, 'pointsMean': pointsMean, \n 'pointsMin': pointsMin, 'pointsMax': pointsMax, \n 'pointsStdev': pointsStdev, 'pointsMedian': pointsMedian, \n 'pointsMode': pointsMode, \n 'secondsInvestedDurations': secondsInvestedDurations, 'secondsInvestedDurationsMean': secondsInvestedDurationsMean, \n 'secondsInvestedDurationsMin': secondsInvestedDurationsMin, 'secondsInvestedDurationsMax': secondsInvestedDurationsMax, \n 'secondsInvestedDurationsStdev': secondsInvestedDurationsStdev, 'secondsInvestedDurationsMedian': secondsInvestedDurationsMedian, \n 'secondsInvestedDurationsMode': secondsInvestedDurationsMode, \n 'secondsReturnedDurationsMean': secondsReturnedDurationsMean, \n 'secondsReturnedDurationsMin': secondsReturnedDurationsMin, 'secondsReturnedDurationsMax': secondsReturnedDurationsMax, \n 'secondsReturnedDurationsStdev': secondsReturnedDurationsStdev, 'secondsReturnedDurationsMedian': secondsReturnedDurationsMedian, \n 'secondsReturnedDurationsMode': secondsReturnedDurationsMode, \n 'usersTrustInvestmentSlope': usersTrustInvestmentSlope, \n 'usersTrustInvestmentIntercept':usersTrustInvestmentIntercept, 'usersTrustInvestmentR_value': usersTrustInvestmentR_value, \n 'usersTrustInvestmentP_value': usersTrustInvestmentP_value, 'usersTrustInvestmentStd_err': usersTrustInvestmentStd_err,\n 'usersInvestmentDurationsSlope': usersInvestmentDurationsSlope, \n 'usersInvestmentDurationsIntercept':usersInvestmentDurationsIntercept, 'usersInvestmentDurationsR_value': usersInvestmentDurationsR_value, \n 'usersInvestmentDurationsP_value': usersInvestmentDurationsP_value, 'usersInvestmentDurationsStd_err': usersInvestmentDurationsStd_err,\n 'usersTrustreturnedSlope': usersTrustreturnedSlope,\n 'usersTrustreturnedIntercept': usersTrustreturnedIntercept,\n 'usersTrustreturnedR_value': usersTrustreturnedR_value,\n 'usersTrustreturnedP_value': usersTrustreturnedP_value,\n 'usersTrustreturnedStd_err': usersTrustreturnedStd_err,\n }\n\n elif page == \"Comments\":\n comments = []\n\n for user in allUsers:\n userComments = user.thankyou_set.all()\n if len(userComments) > 0:\n userComments = userComments[0]\n userComment = []\n userComment.append(str(userComments.user))\n userComment.append(userComments.pretestComment)\n userComment.append(userComments.trainingComment)\n userComment.append(userComments.gamesComment)\n comments.append(userComment)\n\n context = { 'umid': umid, 'allUsers': allUsers, 'welcomepage': 1, 'page': page, 'version': version, \n 'comments': comments, \n }\n\n\n return render(request, 'games/Results.html', context)\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n # except:\n # context = { 'umid': '', 'welcomepage': 1 }\n # return render(request, 'games/Welcome.html', context)\n\nclass Echo(object):\n \"\"\"An object that implements just the write method of the file-like\n interface.\n \"\"\"\n def write(self, value):\n \"\"\"Write the value by returning it, instead of storing in a buffer.\"\"\"\n return value\n\ndef downloadCSV(request, experiment = \"\", part = \"\"):\n if (('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\") or \n (request.session.get('umid', False) and request.session['umid'] != \"\")):\n \"\"\"A view that streams a large CSV file.\"\"\"\n # Generate a sequence of rows. The range is based on the maximum number of\n # rows that can be handled by a single sheet in most spreadsheet\n # applications.\n if ('REMOTE_USER' in request.META and request.META['REMOTE_USER'] != \"\"):\n umid = request.META['REMOTE_USER']\n if (request.session.get('umid', False) and request.session['umid'] != \"\"):\n umid = request.session['umid']\n if (umid in adminsUniquenames):\n rows = ([\"Row {}\".format(idx), str(idx)] for idx in range(65536))\n pseudo_buffer = Echo()\n writer = csv.writer(pseudo_buffer)\n\n print (\"Experiment = \" + experiment)\n\n rows = generateCSVDataset(experiment, part)\n\n response = StreamingHttpResponse((writer.writerow(row) for row in rows),\n content_type=\"text/csv\")\n response['Content-Disposition'] = 'attachment; filename=\"PhishingDataset.csv\"'\n return response\n\n context = { 'umid': '', 'welcomepage': 1 }\n return render(request, 'games/Welcome.html', context)\n\ndef generateCSVDataset(experiment, part):\n rows = []\n if experiment == \"\":\n rows.append(['Username', 'version', 'Experiment Earning', \n 'First Game', 'Second Game', 'Third Game', 'Opted Out', 'Postponed', \n 'age', 'gender', 'emailsperday', 'ownpc', 'ownsmartphone', 'owntablet', \n 'ownotherdevice', 'otherdevice', 'internetuse', \n 'fullname', 'street', 'city', 'state', 'zipcode', 'yearsofeduction', 'ethnicity', \n 'maritalstatus', 'Started Study', 'finishedstudy', \n 'Pretest Question 1', 'Pretest Question 2', \n 'Pretest Question 3', 'Pretest Question 4', \n 'Pretest Question 5', 'Pretest Question 6', \n 'Pretest Question 7', \n 'Pretest 1 Answer was Correct (1) / Wrong (2)', 'Pretest 2 Answer was Correct (1) / Wrong (2)', \n 'Pretest 3 Answer was Correct (1) / Wrong (2)', 'Pretest 4 Answer was Correct (1) / Wrong (2)', \n 'Pretest 5 Answer was Correct (1) / Wrong (2)', 'Pretest 6 Answer was Correct (1) / Wrong (2)', \n 'Pretest 7 Answer was Correct (1) / Wrong (2)', \n 'Pretest Question Clicked 1', 'Pretest Question Clicked 2', \n 'Pretest Question Clicked 3', 'Pretest Question Clicked 4', \n 'Pretest Question Clicked 5', 'Pretest Question Clicked 6', \n 'Pretest Question Clicked 7', \n 'Pretest Question Right Clicked 1', 'Pretest Question Right Clicked 2', \n 'Pretest Question Right Clicked 3', 'Pretest Question Right Clicked 4', \n 'Pretest Question Right Clicked 5', 'Pretest Question Right Clicked 6', \n 'Pretest Question Right Clicked 7', \n 'Pretest Question Hovered 1', 'Pretest Question Hovered 2', \n 'Pretest Question Hovered 3', 'Pretest Question Hovered 4', \n 'Pretest Question Hovered 5', 'Pretest Question Hovered 6', \n 'Pretest Question Hovered 7', \n 'Pretest Question Hovered Duration in Seconds 1', 'Pretest Question Hovered Duration in Seconds 2', \n 'Pretest Question Hovered Duration in Seconds 3', 'Pretest Question Hovered Duration in Seconds 4', \n 'Pretest Question Hovered Duration in Seconds 5', 'Pretest Question Hovered Duration in Seconds 6', \n 'Pretest Question Hovered Duration in Seconds 7', \n 'Pretest Started Question 1', 'Pretest Finished Question 1', \n 'Pretest Started Question 2', 'Pretest Finished Question 2', \n 'Pretest Started Question 3', 'Pretest Finished Question 3', \n 'Pretest Started Question 4', 'Pretest Finished Question 4', \n 'Pretest Started Question 5', 'Pretest Finished Question 5', \n 'Pretest Started Question 6', 'Pretest Finished Question 6', \n 'Pretest Started Question 7', 'Pretest Finished Question 7', \n 'Training Question 1', 'Training Question 2', \n 'Training Question 3', \n 'Training 1 Answer was Correct (1) / Wrong (2)', 'Training 2 Answer was Correct (1) / Wrong (2)', \n 'Training 3 Answer was Correct (1) / Wrong (2)', \n 'Training Question Clicked 1', 'Training Question Clicked 2', \n 'Training Question Clicked 3', \n 'Training Question Right Clicked 1', 'Training Question Right Clicked 2', \n 'Training Question Right Clicked 3', \n 'Training Question Hovered 1', 'Training Question Hovered 2', \n 'Training Question Hovered 3', \n 'Training Question Hovered Duration in Seconds 1', 'Training Question Hovered Duration in Seconds 2', \n 'Training Question Hovered Duration in Seconds 3', \n 'Training Started Question 1', 'Training Finished Question 1', \n 'Training Started Question 2', 'Training Finished Question 2', \n 'Training Started Question 3', 'Training Finished Question 3', \n 'Lottery Decision', 'Lottery Option1', 'Lottery Option2',\n 'Lottery Option3', 'Lottery Option4', 'Lottery Option5', 'Lottery Option6', 'Lottery Option7',\n 'Lottery Option8', 'Lottery Option9', 'Lottery Option10', 'Lottery Die1', 'Lottery Die2',\n 'Lottery Die3', 'Lottery Die4', 'Lottery Die5', 'Lottery Die6', 'Lottery Die7', 'Lottery Die8',\n 'Lottery Die9', 'Lottery Die10', 'Lottery Original Points', 'Lottery Points',\n \"Lottery Subject's Willingness\", 'Lottery Random Willingness', 'Lottery Started',\n 'Lottery Finished', \n 'Gamble Chosen', 'Gamble Coin 1', 'Gamble Coin 2',\n 'Gamble Coin 3', 'Gamble Coin 4', 'Gamble Coin 5', 'Gamble Coin 6', 'Gamble Coin 7',\n 'Gamble Coin 8', 'Gamble Coin 9', 'Gamble Points Earned',\n \"Gamble Subject's Willingness\", 'Gamble Random Willingness', 'Gamble Started',\n 'Gamble Finished', \n 'Trust Game Invested', 'Trust Game Returned 0', 'Trust Game Returned 1', 'Trust Game Returned 2',\n 'Trust Game Returned 3', 'Trust Game Returned 4', 'Trust Game Returned 5', 'Trust Game Played With',\n 'Trust Game Other Player Returned', 'Trust Game Other Player Invested', 'Trust Game Points Earned', \n 'Trust Game Started Investment', 'Trust Game Finished Investment', \n 'Trust Game Started Return 0', 'Trust Game Finished Return 0', \n 'Trust Game Started Return 1', 'Trust Game Finished Return 1', \n 'Trust Game Started Return 2', 'Trust Game Finished Return 2', \n 'Trust Game Started Return 3', 'Trust Game Finished Return 3', \n 'Trust Game Started Return 4', 'Trust Game Finished Return 4', \n 'Trust Game Started Return 5', 'Trust Game Finished Return 5', \n 'Survey Pretest Comment', 'Survey Training Comment', 'Survey Games Comment',\n ])\n\n allUsers = User.objects.all()\n\n for user in allUsers:\n row = []\n row.append(user.username)\n row.append(user.version)\n row.append(user.experimentearning)\n row.append(user.firstgame)\n row.append(user.secondgame)\n row.append(user.thirdgame)\n row.append(user.optout)\n row.append(user.postpone)\n row.append(user.age)\n row.append(user.gender)\n row.append(user.emailsperday)\n row.append(user.ownpc)\n row.append(user.ownsmartphone)\n row.append(user.ownpda)\n row.append(user.ownotherdevice)\n row.append(user.otherdevice)\n row.append(user.internetuse)\n row.append(user.fullname)\n row.append(user.street)\n row.append(user.city)\n row.append(user.state)\n row.append(user.zipcode)\n row.append(user.yearsofeduction)\n row.append(user.ethnicity)\n row.append(user.maritalstatus)\n row.append(user.startedstudy)\n row.append(user.finishedstudy)\n\n if user.pretest_set.count() != 0:\n pretest = user.pretest_set.all()[0]\n row.append(pretest.question1)\n row.append(pretest.question2)\n row.append(pretest.question3)\n row.append(pretest.question4)\n row.append(pretest.question5)\n row.append(pretest.question6)\n row.append(pretest.question7)\n row.append(pretest.correct1)\n row.append(pretest.correct2)\n row.append(pretest.correct3)\n row.append(pretest.correct4)\n row.append(pretest.correct5)\n row.append(pretest.correct6)\n row.append(pretest.correct7)\n row.append(pretest.questionclicked1)\n row.append(pretest.questionclicked2)\n row.append(pretest.questionclicked3)\n row.append(pretest.questionclicked4)\n row.append(pretest.questionclicked5)\n row.append(pretest.questionclicked6)\n row.append(pretest.questionclicked7)\n row.append(pretest.questionrightclicked1)\n row.append(pretest.questionrightclicked2)\n row.append(pretest.questionrightclicked3)\n row.append(pretest.questionrightclicked4)\n row.append(pretest.questionrightclicked5)\n row.append(pretest.questionrightclicked6)\n row.append(pretest.questionrightclicked7)\n row.append(pretest.questionhovered1)\n row.append(pretest.questionhovered2)\n row.append(pretest.questionhovered3)\n row.append(pretest.questionhovered4)\n row.append(pretest.questionhovered5)\n row.append(pretest.questionhovered6)\n row.append(pretest.questionhovered7)\n row.append(pretest.questionhoveredseconds1)\n row.append(pretest.questionhoveredseconds2)\n row.append(pretest.questionhoveredseconds3)\n row.append(pretest.questionhoveredseconds4)\n row.append(pretest.questionhoveredseconds5)\n row.append(pretest.questionhoveredseconds6)\n row.append(pretest.questionhoveredseconds7)\n row.append(pretest.startedquestion1)\n row.append(pretest.finishedquestion1)\n row.append(pretest.startedquestion2)\n row.append(pretest.finishedquestion2)\n row.append(pretest.startedquestion3)\n row.append(pretest.finishedquestion3)\n row.append(pretest.startedquestion4)\n row.append(pretest.finishedquestion4)\n row.append(pretest.startedquestion5)\n row.append(pretest.finishedquestion5)\n row.append(pretest.startedquestion6)\n row.append(pretest.finishedquestion6)\n row.append(pretest.startedquestion7)\n row.append(pretest.finishedquestion7)\n else:\n for index in range(56):\n row.append(\"\")\n\n if user.training_set.count() != 0:\n training = user.training_set.all()[0]\n row.append(training.question1)\n row.append(training.question2)\n row.append(training.question3)\n row.append(training.correct1)\n row.append(training.correct2)\n row.append(training.correct3)\n row.append(training.questionclicked1)\n row.append(training.questionclicked2)\n row.append(training.questionclicked3)\n row.append(training.questionrightclicked1)\n row.append(training.questionrightclicked2)\n row.append(training.questionrightclicked3)\n row.append(training.questionhovered1)\n row.append(training.questionhovered2)\n row.append(training.questionhovered3)\n row.append(training.questionhoveredseconds1)\n row.append(training.questionhoveredseconds2)\n row.append(training.questionhoveredseconds3)\n row.append(training.startedquestion1)\n row.append(training.finishedquestion1)\n row.append(training.startedquestion2)\n row.append(training.finishedquestion2)\n row.append(training.startedquestion3)\n row.append(training.finishedquestion3)\n else:\n for index in range(24):\n row.append(\"\")\n\n if user.holtlaury_set.count() != 0:\n holtLaury = user.holtlaury_set.all()[0]\n row.append(holtLaury.decision)\n row.append(holtLaury.option1)\n row.append(holtLaury.option2)\n row.append(holtLaury.option3)\n row.append(holtLaury.option4)\n row.append(holtLaury.option5)\n row.append(holtLaury.option6)\n row.append(holtLaury.option7)\n row.append(holtLaury.option8)\n row.append(holtLaury.option9)\n row.append(holtLaury.option10)\n row.append(holtLaury.die1)\n row.append(holtLaury.die2)\n row.append(holtLaury.die3)\n row.append(holtLaury.die4)\n row.append(holtLaury.die5)\n row.append(holtLaury.die6)\n row.append(holtLaury.die7)\n row.append(holtLaury.die8)\n row.append(holtLaury.die9)\n row.append(holtLaury.die10)\n row.append(holtLaury.originalPoints)\n row.append(holtLaury.points)\n row.append(holtLaury.willingness)\n row.append(holtLaury.willingnessRand)\n row.append(holtLaury.started)\n row.append(holtLaury.finished)\n else:\n for index in range(27):\n row.append(\"\")\n\n if user.gamble_set.count() != 0:\n gamble = user.gamble_set.all()[0]\n row.append(gamble.chosen)\n row.append(gamble.coin1)\n row.append(gamble.coin2)\n row.append(gamble.coin3)\n row.append(gamble.coin4)\n row.append(gamble.coin5)\n row.append(gamble.coin6)\n row.append(gamble.coin7)\n row.append(gamble.coin8)\n row.append(gamble.coin9)\n row.append(gamble.points)\n row.append(gamble.willingness)\n row.append(gamble.willingnessRand)\n row.append(gamble.started)\n row.append(gamble.finished)\n else:\n for index in range(15):\n row.append(\"\")\n\n if user.investment_set.count() != 0:\n investment = user.investment_set.all()[0]\n row.append(investment.invested)\n row.append(investment.returned0)\n row.append(investment.returned1)\n row.append(investment.returned2)\n row.append(investment.returned3)\n row.append(investment.returned4)\n row.append(investment.returned5)\n row.append(investment.otheruser)\n row.append(investment.otherreturned)\n row.append(investment.otherinvested)\n row.append(investment.points)\n row.append(investment.startedinvested)\n row.append(investment.finishedinvested)\n row.append(investment.startedreturned0)\n row.append(investment.finishedreturned0)\n row.append(investment.startedreturned1)\n row.append(investment.finishedreturned1)\n row.append(investment.startedreturned2)\n row.append(investment.finishedreturned2)\n row.append(investment.startedreturned3)\n row.append(investment.finishedreturned3)\n row.append(investment.startedreturned4)\n row.append(investment.finishedreturned4)\n row.append(investment.startedreturned5)\n row.append(investment.finishedreturned5)\n else:\n for index in range(25):\n row.append(\"\")\n\n if user.thankyou_set.count() != 0:\n thankyou = user.thankyou_set.all()[0]\n row.append(thankyou.trainingComment)\n row.append(thankyou.gamesComment)\n row.append(thankyou.pretestComment)\n else:\n for index in range(3):\n row.append(\"\")\n\n rows.append(row)\n\n elif experiment == \"Pilot\":\n rows.append(['Username', \n 'Trust Game Invested', 'Trust Game Returned 0', 'Trust Game Returned 1', 'Trust Game Returned 2',\n 'Trust Game Returned 3', 'Trust Game Returned 4', 'Trust Game Returned 5', 'Trust Game Played With',\n 'Trust Game Other Player Returned', 'Trust Game Other Player Invested', 'Trust Game Points Earned', \n 'Trust Game Started Investment', 'Trust Game Finished Investment', \n 'Trust Game Started Return 0', 'Trust Game Finished Return 0', \n 'Trust Game Started Return 1', 'Trust Game Finished Return 1', \n 'Trust Game Started Return 2', 'Trust Game Finished Return 2', \n 'Trust Game Started Return 3', 'Trust Game Finished Return 3', \n 'Trust Game Started Return 4', 'Trust Game Finished Return 4', \n 'Trust Game Started Return 5', 'Trust Game Finished Return 5', \n ])\n\n allUsers = User.objects.filter(version = 'Pilot')\n\n for user in allUsers:\n row = []\n row.append(user.username)\n\n if user.investment_set.count() != 0:\n investment = user.investment_set.all()[0]\n row.append(investment.invested)\n row.append(investment.returned0)\n row.append(investment.returned1)\n row.append(investment.returned2)\n row.append(investment.returned3)\n row.append(investment.returned4)\n row.append(investment.returned5)\n row.append(investment.otheruser)\n row.append(investment.otherreturned)\n row.append(investment.otherinvested)\n row.append(investment.points)\n row.append(investment.startedinvested)\n row.append(investment.finishedinvested)\n row.append(investment.startedreturned0)\n row.append(investment.finishedreturned0)\n row.append(investment.startedreturned1)\n row.append(investment.finishedreturned1)\n row.append(investment.startedreturned2)\n row.append(investment.finishedreturned2)\n row.append(investment.startedreturned3)\n row.append(investment.finishedreturned3)\n row.append(investment.startedreturned4)\n row.append(investment.finishedreturned4)\n row.append(investment.startedreturned5)\n row.append(investment.finishedreturned5)\n else:\n for index in range(25):\n row.append(\"\")\n\n rows.append(row)\n\n return rows\n","sub_path":"UMSecurity/games/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":140962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"27727342","text":"import os\nimport textwrap\n\nimport requests\nfrom laim import Laim\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\n\nclass SlackHandler(Laim):\n\n def __init__(self):\n super().__init__()\n self.session = requests.Session()\n self.session.mount('https://', HTTPAdapter(max_retries=Retry(\n total=6,\n status_forcelist=(429, 500, 502, 503, 504),\n allowed_methods=('GET', 'HEAD', 'POST', 'DELETE', 'PUT', 'OPTIONS', 'TRACE'),\n backoff_factor=2,\n )))\n self.session.headers.update({\n 'Authorization': 'Bearer %s' % self.config['slack-token'],\n # Explicitly set charset to avoid warnings from slack\n 'Content-Type': 'application/json; charset=utf-8',\n })\n\n self.channel_id = self.config['slack-channel-id']\n self.hostname = '{{ grains.id }}'\n\n\n def handle_message(self, sender, recipients, message):\n response = self.session.post('https://slack.com/api/chat.postMessage',\n timeout=60,\n json={\n 'channel': self.channel_id,\n 'text': textwrap.dedent('''\\\n `%s` received mail for %s\n *From*: %s\n *To*: %s\n *Subject*: %s\n\n %s\n ''') % (\n self.hostname,\n ', '.join(recipients),\n message.get('From'),\n message.get('To'),\n message.get('Subject'),\n message.get_payload(),\n ),\n },\n )\n body = response.json()\n if not body['ok']:\n raise ValueError('Failed to forward mail to slack, got %r', body)\n\n\nif __name__ == '__main__':\n handler = SlackHandler()\n handler.run()\n","sub_path":"salt/laim-slack/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"400251687","text":"# Copyright 2017 AT&T Corporation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport yaml\n\nfrom oslo_log import log as logging\n\nfrom tempest.lib import exceptions\n\nfrom patrole_tempest_plugin.rbac_utils import RbacAuthority\n\nLOG = logging.getLogger(__name__)\n\n\nclass RequirementsParser(object):\n _inner = None\n\n class Inner(object):\n _rbac_map = None\n\n def __init__(self, filepath):\n with open(filepath) as f:\n RequirementsParser.Inner._rbac_map = \\\n list(yaml.safe_load_all(f))\n\n def __init__(self, filepath):\n if RequirementsParser._inner is None:\n RequirementsParser._inner = RequirementsParser.Inner(filepath)\n\n @staticmethod\n def parse(component):\n try:\n for section in RequirementsParser.Inner._rbac_map:\n if component in section:\n return section[component]\n except yaml.parser.ParserError:\n LOG.error(\"Error while parsing the requirements YAML file. Did \"\n \"you pass a valid component name from the test case?\")\n return None\n\n\nclass RequirementsAuthority(RbacAuthority):\n def __init__(self, filepath=None, component=None):\n if filepath is not None and component is not None:\n self.roles_dict = RequirementsParser(filepath).parse(component)\n else:\n self.roles_dict = None\n\n def allowed(self, rule_name, role):\n if self.roles_dict is None:\n raise exceptions.InvalidConfiguration(\n \"Roles dictionary parsed from requirements YAML file is \"\n \"empty. Ensure the requirements YAML file is correctly \"\n \"formatted.\")\n try:\n _api = self.roles_dict[rule_name]\n return role in _api\n except KeyError:\n raise KeyError(\"'%s' API is not defined in the requirements YAML \"\n \"file\" % rule_name)\n return False\n","sub_path":"patrole_tempest_plugin/requirements_authority.py","file_name":"requirements_authority.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"208101599","text":"import boto3\nimport tweepy\n\nprint('StreamingService Lambda Function Initiated')\n\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_token_secret = ''\n\nsqs = boto3.resource('sqs')\nqueue = sqs.get_queue_by_name(QueueName='tweets-queue')\n\nclass StreamListener(tweepy.StreamListener):\n\t\n\tdef __init__(self, api):\n\t\tself.api = api\n\t\tsuper(tweepy.StreamListener, self).__init__()\n\n\tdef on_data(self, tweet):\n\t\twhile True:\n\t\t\tresponse = queue.send_message(MessageBody=tweet)\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef on_error(self, status_code):\n\t\tprint(\"status_code = \",status_code)\n\t\tif status_code == 420:\n\t\t\treturn False\n\ndef twitter_stream():\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_token, access_token_secret)\n\tapi = tweepy.API(auth)\n\tstream_listener = StreamListener(api)\n\tstream = tweepy.Stream(auth=api.auth, listener=stream_listener)\n\tstream.filter(locations=[-180,-90,180,90], languages=['en'])\n\ndef lambda_handler(event, context):\n\tprint(\"Streaming Service Started\")\n\ttwitter_stream()","sub_path":"Lambda Functions/streaming_service.py","file_name":"streaming_service.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"35246419","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/electrum_chi/electrum/gui/qt/configure_name_dialog.py\n# Compiled at: 2019-08-24 06:06:43\n# Size of source mod 2**32: 8380 bytes\nimport sys, traceback\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom electrum.bitcoin import TYPE_ADDRESS\nfrom electrum.commands import NameAlreadyExistsError\nfrom electrum.i18n import _\nfrom electrum.names import format_name_identifier\nfrom electrum.network import TxBroadcastError, BestEffortRequestFailed\nfrom electrum.util import NotEnoughFunds, NoDynamicFeeEstimates\nfrom electrum.wallet import InternalAddressCorruption\nfrom .paytoedit import PayToEdit\ndialogs = []\n\ndef show_configure_name(identifier, value, parent, is_new):\n d = ConfigureNameDialog(identifier, value, parent, is_new)\n dialogs.append(d)\n d.show()\n\n\nclass ConfigureNameDialog(QDialog):\n\n def __init__(self, identifier, value, parent, is_new):\n QDialog.__init__(self, parent=None)\n self.main_window = parent\n self.setMinimumWidth(545)\n self.setMinimumHeight(245)\n if is_new:\n self.setWindowTitle(_('Configure New Name'))\n else:\n self.setWindowTitle(_('Reconfigure Name'))\n form_layout = QFormLayout()\n self.identifier = identifier\n formatted_name = format_name_identifier(identifier)\n form_layout.addRow(QLabel(formatted_name))\n self.dataEdit = QLineEdit()\n self.dataEdit.setText(value.decode('ascii'))\n form_layout.addRow(_('Data:'), self.dataEdit)\n self.transferTo = PayToEdit(self.main_window)\n form_layout.addRow(_('Transfer to:'), self.transferTo)\n form = QWidget()\n form.setLayout(form_layout)\n self.buttons_box = QDialogButtonBox()\n self.buttons_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)\n buttons_hbox = QHBoxLayout()\n buttons_hbox.addStretch()\n buttons_hbox.addWidget(self.buttons_box)\n buttons = QWidget()\n buttons.setLayout(buttons_hbox)\n vbox = QVBoxLayout()\n vbox.addWidget(form)\n vbox.addWidget(buttons)\n self.setLayout(vbox)\n self.buttons_box.accepted.connect(self.accept)\n self.buttons_box.rejected.connect(self.reject)\n if is_new:\n self.accepted.connect(lambda : self.register_and_broadcast(self.identifier, self.dataEdit.text().encode('ascii'), self.transferTo))\n else:\n self.accepted.connect(lambda : self.update_and_broadcast(self.identifier, self.dataEdit.text().encode('ascii'), self.transferTo))\n\n def register_and_broadcast(self, identifier, value, transfer_to):\n if transfer_to.toPlainText() == '':\n recipient_address = None\n else:\n recipient = transfer_to.get_recipient()\n if recipient is None:\n recipient_type, recipient_address = None, transfer_to.toPlainText()\n else:\n recipient_type, recipient_address = recipient\n if recipient_type != TYPE_ADDRESS:\n self.main_window.show_error(_('Invalid address ') + recipient_address)\n return\n else:\n name_register = self.main_window.console.namespace.get('name_register')\n broadcast = self.main_window.console.namespace.get('broadcast')\n try:\n tx = name_register(identifier.decode('utf-8'), value.decode('ascii'), recipient_address)['hex']\n except NameAlreadyExistsError as e:\n try:\n self.main_window.show_message(_('Error registering ') + formatted_name + ': ' + str(e))\n return\n finally:\n e = None\n del e\n\n except (NotEnoughFunds, NoDynamicFeeEstimates) as e:\n try:\n formatted_name = format_name_identifier(identifier)\n self.main_window.show_message(_('Error registering ') + formatted_name + ': ' + str(e))\n return\n finally:\n e = None\n del e\n\n except InternalAddressCorruption as e:\n try:\n formatted_name = format_name_identifier(identifier)\n self.main_window.show_error(_('Error registering ') + formatted_name + ': ' + str(e))\n raise\n finally:\n e = None\n del e\n\n except BaseException as e:\n try:\n traceback.print_exc(file=(sys.stdout))\n formatted_name = format_name_identifier(identifier)\n self.main_window.show_message(_('Error registering ') + formatted_name + ': ' + str(e))\n return\n finally:\n e = None\n del e\n\n try:\n broadcast(tx)\n except Exception as e:\n try:\n formatted_name = format_name_identifier(identifier)\n self.main_window.show_error(_('Error broadcasting registration for ') + formatted_name + ': ' + str(e))\n return\n finally:\n e = None\n del e\n\n def update_and_broadcast(self, identifier, value, transfer_to):\n if transfer_to.toPlainText() == '':\n recipient_address = None\n else:\n recipient = transfer_to.get_recipient()\n if recipient is None:\n recipient_type, recipient_address = None, transfer_to.toPlainText()\n else:\n recipient_type, recipient_address = recipient\n if recipient_type != TYPE_ADDRESS:\n self.main_window.show_error(_('Invalid address ') + recipient_address)\n return\n else:\n name_update = self.main_window.console.namespace.get('name_update')\n broadcast = self.main_window.console.namespace.get('broadcast')\n try:\n tx = name_update(identifier.decode('utf-8'), value.decode('ascii'), recipient_address)['hex']\n except (NotEnoughFunds, NoDynamicFeeEstimates) as e:\n try:\n formatted_name = format_name_identifier(identifier)\n self.main_window.show_message(_('Error creating update for ') + formatted_name + ': ' + str(e))\n return\n finally:\n e = None\n del e\n\n except InternalAddressCorruption as e:\n try:\n formatted_name = format_name_identifier(identifier)\n self.main_window.show_error(_('Error creating update for ') + formatted_name + ': ' + str(e))\n raise\n finally:\n e = None\n del e\n\n except BaseException as e:\n try:\n traceback.print_exc(file=(sys.stdout))\n formatted_name = format_name_identifier(identifier)\n self.main_window.show_message(_('Error creating update for ') + formatted_name + ': ' + str(e))\n return\n finally:\n e = None\n del e\n\n try:\n broadcast(tx)\n except Exception as e:\n try:\n formatted_name = format_name_identifier(identifier)\n self.main_window.show_error(_('Error broadcasting update for ') + formatted_name + ': ' + str(e))\n return\n finally:\n e = None\n del e","sub_path":"pycfiles/Electrum_CHI-3.3.8-py3.7/configure_name_dialog.cpython-37.py","file_name":"configure_name_dialog.cpython-37.py","file_ext":"py","file_size_in_byte":8157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"418428044","text":"palavra = input('Digite uma palavra: ')\nL = list(palavra)\nV = []\nC = []\n\nfor i in L:\n if i=='A' or i=='E' or i=='I' or i=='O' or i=='U' or i=='a' or i=='e' or i=='i' or i=='o' or i=='u':\n V.append(i)\n else:\n C.append(i)\nprint('VOGAIS:',V)\nprint('CONSOANTES: ',C)","sub_path":"2UNIDADE/LISTA 3/Q17.py","file_name":"Q17.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"363361521","text":"import json\nimport requests\nfrom datetime import datetime\n\nfrom flask import current_app\n\n\ndef get_auth():\n return (\n current_app.config.get('BDR_ENDPOINT_USER', 'user'),\n current_app.config.get('BDR_ENDPOINT_PASSWORD', 'pass'),\n )\n\n\ndef get_absolute_url(url):\n return current_app.config['BDR_ENDPOINT_URL'] + url\n\n\ndef do_bdr_request(params, relative_url):\n url = get_absolute_url(relative_url)\n auth = get_auth()\n ssl_verify = current_app.config['HTTPS_VERIFY']\n\n error_message = ''\n response = None\n try:\n response = requests.get(url, params=params, auth=auth,\n verify=ssl_verify)\n except requests.ConnectionError:\n error_message = 'BDR was unreachable - {}'.format(datetime.now())\n\n if response is not None and response.headers.get(\n 'content-type') == 'application/json':\n json_data = json.loads(response.content)\n if json_data.get('status') != 'success':\n error_message = json_data.get('message')\n elif response.status_code != 200:\n error_message = 'Invalid status code: ' + response.status_code\n else:\n error_message = 'Invalid response: ' + str(response)\n\n if error_message:\n current_app.logger.warning(error_message)\n if 'sentry' in current_app.extensions:\n current_app.extensions['sentry'].captureMessage(error_message)\n\n return not error_message\n\n\ndef call_bdr(undertaking, old_collection=False):\n if not current_app.config.get('BDR_ENDPOINT_URL'):\n current_app.logger.warning('No bdr endpoint. No bdr call.')\n return True\n params = {\n 'company_id': undertaking.external_id,\n 'domain': undertaking.domain,\n 'country': undertaking.country_code,\n 'name': undertaking.name\n }\n if old_collection:\n params['old_collection_id'] = undertaking.oldcompany_account\n\n relative_url = '/ReportekEngine/update_company_collection'\n\n return do_bdr_request(params, relative_url)\n","sub_path":"fcs/sync/bdr.py","file_name":"bdr.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"521862839","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import fsolve\n\nx = np.arange(-10, 10, 0.1)\n\nplt.plot(x, x)\nplt.plot(x, np.sin(x))\nplt.show()\n\ndef f(x):\n return x-np.cos(x)\n\ndef f_der(x):\n return 1+np.sin(x)\n\ndef binary_search(a, b):\n c = 0\n while np.abs(a-b) > 10**(-12):\n c = (a+b)/2\n if f(c) < 0:\n a = c\n elif f(c) > 0:\n b = c\n return \"Med intervallhalvering får man lösningen x=\" + str(c) + \" med felet: \" + str((10**-(12))/2)\n\ndef newton_raphsons(x):\n felterm = f(x)/f_der(x)\n x = x-felterm\n while np.abs(x-(x-felterm)) > 10**(-12):\n felterm = f(x)/f_der(x)\n x = x-felterm\n return \"Med Newton-Raphson får man lösningen x=\" + str(x) + \" med felet: \" + str(felterm)\n\n\nprint(binary_search(-1, 1))\n\nprint(newton_raphsons(-1))\n\n#d) Intervallhalvering kräver flest iterationer\n\n\n\n","sub_path":"Labbar/Num1/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"190507128","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Sabina Langås'\n__email__ = 'sabinal@nmbu.no'\n\n\nclass ListRand:\n\n def __init__(self, list_numbers):\n self.random_number = list_numbers.copy()\n self.next = 0\n\n def rand(self):\n if self.next >= len(self.random_number):\n raise RuntimeError(\"The last number has been delivered\")\n\n number = self.random_number[self.next]\n self.next += 1\n\n return number\n\n\nclass LCGRand:\n a = 7 ** 5\n m = 2 ** 31 - 1\n\n def __init__(self, seed):\n self.previous = seed\n\n def rand(self):\n self.previous = LCGRand.a * self.previous % LCGRand.m\n\n return self.previous\n\n\nif __name__ == '__main__':\n\n list_of_numbers_test = ListRand([5, 3, 4, 6, 8, 9])\n lcg_test = LCGRand(5)\n\n for i in range(6):\n print('Random number from LCGRand {} and from ListRand {}'.format(\n lcg_test.rand(), list_of_numbers_test.rand()))\n","sub_path":"src/sabina_langas_ex/ex04/myrand.py","file_name":"myrand.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"303839413","text":"import os\nf = open(os.path.expanduser(\"~/Desktop/karthik.txt\"))\ns =f.split()\nf = sorted(s)\nf1 = f[0]\nf2 = f[1:]\n\nl = len(f1)\nm = ''\nfor i in range(0, l):\n for j in range(l, i + len(m), -1):\n s1 = f1[i:j]\n\n matched_all = True\n for s2 in f2:\n if s1 not in s2:\n matched_all = False\n break\n\n if matched_all:\n m = s1\n break\n\nprint(m)","sub_path":"LCSM.py","file_name":"LCSM.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"348818902","text":"import asyncio\nimport json\nimport os\nimport random\nimport re\nimport traceback\nfrom datetime import datetime\n\nimport discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\n\nfrom commands import Main\n\nCANVAS_COLOR = 0xe13f2b\nCANVAS_THUMBNAIL_URL = \"https://lh3.googleusercontent.com/2_M-EEPXb2xTMQSTZpSUefHR3TjgOCsawM3pjVG47jI-BrHoXGhKBpdEHeLElT95060B=s180\"\n\nload_dotenv()\nCS221BOT_KEY = os.getenv(\"CS221BOT_KEY\")\n\nbot = commands.Bot(command_prefix=\"!\", help_command=None, intents=discord.Intents.all())\n\n\ndef loadJSON(jsonfile):\n with open(jsonfile, \"r\") as f:\n b = json.load(f)\n return json.loads(b)\n\n\ndef writeJSON(data, jsonfile):\n b = json.dumps(data)\n with open(jsonfile, \"w\") as f:\n json.dump(b, f)\n\n\nasync def status_task():\n await bot.wait_until_ready()\n\n while not bot.is_closed():\n online_members = {member for guild in bot.guilds for member in guild.members if not member.bot and member.status != discord.Status.offline}\n\n play = [\"with the \\\"help\\\" command\", \" \", \"with your mind\", \"ƃuᴉʎɐlԀ\", \"...something?\",\n \"a game? Or am I?\", \"¯\\_(ツ)_/¯\", f\"with {len(online_members)} people\", \"with image manipulation\"]\n listen = [\"smart music\", \"... wait I can't hear anything\",\n \"rush 🅱\", \"C++ short course\"]\n watch = [\"TV\", \"YouTube vids\", \"over you\",\n \"how to make a bot\", \"C++ tutorials\", \"I, Robot\"]\n\n rng = random.randrange(0, 3)\n\n if rng == 0:\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=random.choice(play)))\n elif rng == 1:\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=random.choice(listen)))\n else:\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=random.choice(watch)))\n\n await asyncio.sleep(30)\n\n\ndef startup():\n try:\n bot.poll_dict = bot.loadJSON(\"data/poll.json\")\n bot.canvas_dict = bot.loadJSON(\"data/canvas.json\")\n bot.piazza_dict = bot.loadJSON(\"data/piazza.json\")\n except FileNotFoundError:\n bot.writeJSON({}, \"data/poll.json\")\n bot.poll_dict = bot.loadJSON(\"data/poll.json\")\n bot.writeJSON({}, \"data/canvas.json\")\n bot.canvas_dict = bot.loadJSON(\"data/canvas.json\")\n bot.writeJSON({}, \"data/piazza.json\")\n bot.piazza_dict = bot.loadJSON(\"data/piazza.json\")\n\n for channel in list(bot.poll_dict):\n if not bot.get_channel(int(channel)):\n del bot.poll_dict[channel]\n\n for guild in bot.guilds:\n for channel in guild.text_channels:\n if str(channel.id) not in bot.poll_dict:\n bot.poll_dict.update({str(channel.id): \"\"})\n\n bot.writeJSON(bot.poll_dict, \"data/poll.json\")\n\n Main.canvas_init(bot.get_cog(\"Main\"))\n Main.piazza_start(bot.get_cog(\"Main\"))\n\n\nasync def wipe_dms():\n guild = bot.get_guild(745503628479037492)\n\n while True:\n await asyncio.sleep(300)\n\n for channel in guild.channels:\n if channel.name.startswith(\"221dm-\"):\n async for msg in channel.history(limit=1):\n if (datetime.utcnow() - msg.created_at).total_seconds() >= 86400:\n await channel.delete()\n break\n else:\n await channel.delete()\n\n\n@bot.event\nasync def on_ready():\n startup()\n print(\"Logged in successfully\")\n bot.loop.create_task(status_task())\n bot.loop.create_task(wipe_dms())\n\n\n@bot.event\nasync def on_guild_join(guild):\n for channel in guild.text_channels:\n bot.poll_dict.update({str(channel.id): \"\"})\n bot.writeJSON(bot.poll_dict, \"data/poll.json\")\n\n\n@bot.event\nasync def on_guild_remove(guild):\n for channel in guild.channels:\n if str(channel.id) in bot.poll_dict:\n del bot.poll_dict[str(channel.id)]\n bot.writeJSON(bot.poll_dict, \"data/poll.json\")\n\n\n@bot.event\nasync def on_channel_create(channel):\n if isinstance(channel, discord.TextChannel):\n bot.poll_dict.update({str(channel.id): \"\"})\n bot.writeJSON(bot.poll_dict, \"data/poll.json\")\n\n\n@bot.event\nasync def on_channel_delete(channel):\n if str(channel.id) in bot.poll_dict:\n del bot.poll_dict[str(channel.id)]\n bot.writeJSON(bot.poll_dict, \"data/poll.json\")\n\n\n@bot.event\nasync def on_message_edit(before, after):\n await bot.process_commands(after)\n\n\n@bot.event\nasync def on_message(message):\n if message.author.bot:\n return\n else:\n # debugging\n # with open(\"messages.txt\", \"a\") as f:\n # \tprint(f\"{message.guild.name}: {message.channel.name}: {message.author.name}: \\\"{message.content}\\\" @ {str(datetime.datetime.now())} \\r\\n\", file = f)\n # print(message.content)\n\n # this is some weird bs happening only with android users in certain servers and idk why it happens\n # but basically the '@' is screwed up\n if re.findall(r\"<<@&457618814058758146>&?\\d{18}>\", message.content):\n new = message.content.replace(\"<@&457618814058758146>\", \"@\")\n await message.channel.send(new)\n\n await bot.process_commands(message)\n\n\nif __name__ == \"__main__\":\n bot.loadJSON = loadJSON\n bot.writeJSON = writeJSON\n bot.load_extension(\"commands\")\n print(\"commands module loaded\")\n\n\n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound) or isinstance(error, discord.HTTPException) or isinstance(error, discord.NotFound):\n pass\n elif isinstance(error, commands.CommandOnCooldown):\n await ctx.send(f\"Oops! That command is on cooldown right now. Please wait **{round(error.retry_after, 3)}** seconds before trying again.\", delete_after=error.retry_after)\n elif isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(f\"The required argument(s) {error.param} is/are missing.\", delete_after=5)\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(\"This command is disabled.\", delete_after=5)\n elif isinstance(error, commands.MissingPermissions) or isinstance(error, commands.BotMissingPermissions):\n await ctx.send(error, delete_after=5)\n else:\n etype = type(error)\n trace = error.__traceback__\n\n # prints full traceback\n try:\n await ctx.send((\"```\" + \"\".join(traceback.format_exception(etype, error, trace, 999)) + \"```\").replace(\"C:\\\\Users\\\\William\\\\anaconda3\\\\lib\\\\site-packages\\\\\", \"\").replace(\"D:\\\\my file of stuff\\\\cs221bot\\\\\", \"\"))\n except:\n print((\"```\" + \"\".join(traceback.format_exception(etype, error, trace, 999)) + \"```\").replace(\"C:\\\\Users\\\\William\\\\anaconda3\\\\lib\\\\site-packages\\\\\", \"\").replace(\"D:\\\\my file of stuff\\\\cs221bot\\\\\", \"\"))\n\nbot.loop.create_task(Main.track_inotes(bot.get_cog(\"Main\")))\nbot.loop.create_task(Main.send_pupdate(bot.get_cog(\"Main\")))\nbot.loop.create_task(Main.stream_tracking(bot.get_cog(\"Main\")))\nbot.loop.create_task(Main.assignment_reminder(bot.get_cog(\"Main\")))\nbot.run(CS221BOT_KEY)\n","sub_path":"cs221bot.py","file_name":"cs221bot.py","file_ext":"py","file_size_in_byte":7157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"361549033","text":"# 구구단 출력\ndan = input('출력할 단을 입력해주세요.[2~9] ')\n# dan = 5\ndan = int(dan)\ngop = 0\n\nprint(dan, '단 출 력 \\n' + '-'*20)\n# print(dan, '단 출 력 - Case1\\n' + '-'*20)\nfor i in range(9):\n num = i + 1\n gop = dan * num\n print(dan, '*', num, '=', gop)\n\n#\n# print(dan, '단 출 력 - Case2\\n' + '-'*20)\n# for i in range(9):\n# num = i + 1\n# gop = dan * num\n# print('%d * %d = %d' % (dan, num, gop))\n#\n# print(dan, '단 출 력 - Case3\\n' + '-'*20)\n# for i in range(9):\n# num = i + 1\n# gop = dan * num\n# print('{} * {} = {}'.format(dan, num, gop))\n#\n# print(dan, '단 출 력 - Case4\\n' + '-'*20)\n# for i in range(9):\n# num = i + 1\n# gop = dan * num\n# print('{d} * {n} = {g}'.format(d=dan, n=num, g=gop))\n","sub_path":"Sect-A/source/sect05_project/proj10_gugudan.py","file_name":"proj10_gugudan.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"141671096","text":"'''\nCreated on 6 Jul 2014\n\n@author: wrightm\n'''\nimport unittest\nimport numpy\nimport scipy.misc\nimport matplotlib.pyplot\n\n\nclass BooleanIndexingTest(unittest.TestCase):\n\n def get_indices(self, size):\n arr = numpy.arange(size)\n return arr % 4 == 0\n \n def test(self):\n # Plot Lena\n lena1 = scipy.misc.lena().copy() \n xindices = self.get_indices(lena1.shape[0])\n yindices = self.get_indices(lena1.shape[1])\n lena1[xindices, yindices] = 0\n matplotlib.pyplot.subplot(211)\n matplotlib.pyplot.imshow(lena1)\n\n lena2 = scipy.misc.lena().copy() \n # Between quarter and 3 quarters of the max value\n lena2[(lena2 > lena2.max()/4) & (lena2 < 3 * lena2.max()/4)] = 0\n matplotlib.pyplot.subplot(212)\n matplotlib.pyplot.imshow(lena2)\n\n matplotlib.pyplot.show()\n\n \n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","sub_path":"NumpyCookbook/src/test/advanced_indexing_and_array_concepts/test_boolean_indexing.py","file_name":"test_boolean_indexing.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"300930151","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2015 dlilien \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\ngmsh support functions\n\"\"\"\nimport os\nimport numpy as np\nimport subprocess as sp\n\n\ndef extrude_DEM(in_dir, surf_dem, bed_dem, levels=3, bl=0, blp=0.1, out_dir=None, surf_info=None, bed_info=None):\n \"\"\" Wrap around the extrude mesh and MshGlacierDEM commands\n\n Parameters\n ----------\n in_dir : str\n The mesh\n surf_dem : str\n The surface DEM, formatted as a grid\n bed_dem : str\n The bed DEM, formatted as a grid\n levels : int\n The number of levels to use\n bl : int, optional\n Use a boundary layer of this many layers. Default 0.\n blp : float, optional\n The size in percentage of the boundary layer. Negative to put at top.\n out_dir : str, optional\n Output directory. Default (None) is to append _extruded to in_dir.\n \"\"\"\n if out_dir is None:\n out_dir = in_dir + '_extruded'\n if not os.path.exists(in_dir):\n raise IOError('File not found')\n if bl > levels:\n raise ValueError(\n 'Boundary layer must be smaller than number of layers')\n if surf_info is None:\n surf_info = os.path.splitext(surf_dem)[0] + '_info.dat'\n if bed_info is None:\n bed_info = os.path.splitext(bed_dem)[0] + '_info.dat'\n with open(surf_info) as s_f:\n with open(bed_info) as b_f:\n with open('mesh_input.dat', 'w') as mesh_f:\n mesh_f.write('! filename\\n')\n mesh_f.write('\"' + out_dir + '\"\\n')\n for line in s_f.readlines():\n mesh_f.write(line)\n for line in b_f.readlines():\n mesh_f.write(line)\n mesh_f.write('! Minimum ice thickness\\n')\n mesh_f.write('1.0\\n')\n mesh_f.write('! Number of Partitions\\n')\n mesh_f.write('1')\n\n # This is a really hacky solution...\n if os.path.exists('/usr/local/opt/elmer/lib/elmersolver'):\n os.environ[\n 'DYLD_LIBRARY_PATH'] = '/usr/local/opt/elmer/lib/elmersolver:/usr/local/lib'\n elif os.path.exists('/Users/dlilien/work/Elmer/lib'):\n os.environ['DYLD_LIBRARY_PATH'] = '/Users/dlilien/work/Elmer/lib/elmersolver:/usr/local/lib'\n print('Running Extrudemesh to get basic 3d')\n sp.Popen(['ExtrudeMesh', in_dir, out_dir, str(levels),\n '1', '1', '0', str(bl), str(blp), '0']).wait()\n print('Meshing glacier DEM')\n sp.Popen(['MshGlacierDEM']).wait()\n os.remove('mesh_input.dat')\n os.environ['DYLD_LIBRARY_PATH'] = ''\n\n\ndef divide_edges(shp, edge_name='edge'):\n \"\"\" Break up the edges listed in a shapefile based on some field\n\n Parameters\n ----------\n shp : dictionary\n a dictionary of arrays/lists like that produced :py:method:`shp2dict`\n edge_name : str\n The key of the field with information about the edges. The unique values are the edges. If used with :py:method:`gmsh_outline` then the first value overlaps with the previous edge. Default is 'edge'\n \"\"\"\n # This is likely overkill, but be careful about indexing to make sure\n # nothing gets screwed up\n edge_nums, indices = np.unique(shp[edge_name], return_index=True)\n sorted_order = np.argsort(indices)\n return [shp['coords'][shp[edge_name] == edge_nums[i]] for i in sorted_order]\n\n\ndef divide_edges_lines(shp, edge_name='gid'):\n \"\"\" Break up the edges listed in a shapefile based on some field\n\n Parameters\n ----------\n shp : dictionary\n a dictionary of arrays/lists like that produced :py:method:`shp2dict`\n edge_name : str\n The key of the field with information about the edges. The unique values are the edges. If used with :py:method:`gmsh_outline` then the first value overlaps with the previous edge. Default is 'edge'\n \"\"\"\n # This is likely overkill, but be careful about indexing to make sure\n # nothing gets screwed up\n sorted_order = np.argsort(shp[edge_name])\n return [shp['coords'][i] for i in sorted_order]\n\n\ndef gmsh_outline_simple(fname, outline, outlc):\n \"\"\"Write points to a file which then can be meshed. Deprecated, delete soon\"\"\"\n # This is going to make a somewhat messy mesh in terms of numbering. Oh\n # well\n fid = open(fname, 'w')\n fid.write('lc={:4.2f};\\n'.format(outlc.min()))\n formatspec = 'p{:d}=newp; Point(p{:d})={{{:4.3f}, {:4.3f}, 0.0, {:4.3f}}};\\n'\n for i in range(0, len(outline)):\n fid.write(\n formatspec.format(i + 1, i + 1, outline[i, 0], outline[i, 1], outlc[i, 0]))\n lx = len(outline)\n fid.write('s{:d}=newreg; Spline(s{:d})={{p{:d}'.format(1, 1, 1))\n for j in range(0, lx - 1):\n fid.write(',p{:d}'.format(j + 2))\n fid.write(',p{:d}'.format(1))\n fid.write('};\\n')\n fid.write(\n 'pl{:d}=newreg; Physical Line(pl{:d})={{s{:d}}};\\n'.format(1, 1, 1))\n\n # lineloop\n fid.write('ll1=newreg; Line Loop(ll1)={s1')\n fid.write('};\\n')\n\n fid.write('ps1=newreg; Plane Surface(ps1)={ll1')\n fid.write('};\\n')\n fid.write('ps2=newreg; Physical Surface(ps2)={ps1};\\n')\n fid.close()\n return\n\n\ndef gmsh_outline(out_fn, outline, outline_lc, cuts=None, cuts_lc=None, points=None, points_lc=None, spline_or_line='Spline'):\n \"\"\" Turn an outline into a .geo type file for gmsh, do not use splines so edges are sharp\n\n Parameters\n ----------\n out_fn : str\n Output filename. .geo or .txt extension recommended\n outline : tuple of lists\n Each list is an edge on the mesh. Must be in order, and the first element of each list is shared with the previous edge. i.e. ([1,2],[3,4]) make edges 1-2-3 and 3-4\n outline_lc : float or tuple\n Target mesh size for the outline. This can be an array if variable sizing is desired per edge, or a tuple of lists matching the outline in length.\n cuts : tuple of lists\n Format is the same as for outline, but each list will be looped to itself then removed from the original mesh.\n cuts_lc : tuple of lists\n Target mesh size for cuts.\n points : list\n Points at which to target mesh size\n points_lc : list or float\n Target mesh size for those points\n \"\"\"\n\n if not (type(outline) == tuple or type(outline) == list):\n raise TypeError('Outline must be a tuple or list of lists')\n if cuts is not None:\n if not (type(outline) == tuple or type(outline) == list):\n raise TypeError('cuts must be a tuple or list of lists')\n if cuts_lc is None:\n raise TypeError('Target mesh size must be provided if cuts are')\n cut_lc = get_lc(cuts_lc)\n if points is not None:\n if points_lc is None:\n raise TypeError('Target mesh size must be provided if points are')\n if not (type(points) == tuple or type(points) == list):\n raise TypeError('points must be a tuple of lists')\n point_lc = get_lc(points_lc)\n # initialize a bunch of counters\n node_num = 1\n line_num = 1\n phys_num = 1\n loop_num = 1\n\n # make a nice wrappers for the target mesh\n out_lc = get_lc(outline_lc)\n\n # Get the file ready\n fid = open(out_fn, 'w')\n fid.write('lc={:4.2f};\\n'.format(out_lc.max))\n\n # Define the format for points\n formatspec = 'p{:d}=newp; Point(p{:d})={{{:4.3f}, {:4.3f}, 0.0, {:4.3f}}};\\n'\n\n for i, edge in enumerate(outline):\n for j, point in enumerate(edge):\n fid.write(\n formatspec.format(node_num + j, node_num + j, point[0], point[1], out_lc(i, j)))\n node_num += len(edge)\n\n temp_node_num = 1\n\n if True: # spline_or_line == 'Spline':\n\n # make splines for edge\n for i, edge in enumerate(outline):\n fid.write('s{:d}=newreg; {:s}(s{:d})={{'.format(line_num, spline_or_line, line_num))\n for j, point in enumerate(edge):\n fid.write('p{:d},'.format(temp_node_num + j))\n temp_node_num += len(edge)\n if not i == len(outline) - 1:\n fid.write('p{:d}}};\\n'.format(temp_node_num))\n else:\n fid.write('p{:d}}};\\n'.format(1))\n\n line_num += 1\n\n # else:\n # make lines for edge\n # for i, edge in enumerate(outline):\n # for j in range(len(edge) - 1):\n # fid.write('s{:d}=newreg; {:s}(s{:d})={{p{:d}, p{:d}}};\\n'.format(line_num, spline_or_line, line_num, temp_node_num + j, temp_node_num + j + 1))\n # line_num += 1\n\n # temp_node_num += len(edge)\n # if not i == len(outline) - 1:\n # fid.write('s{:d}=newreg; {:s}(s{:d})={{p{:d}, p{:d}}};\\n'.format(line_num, spline_or_line, line_num, temp_node_num - 1, temp_node_num))\n # else:\n # fid.write('s{:d}=newreg; {:s}(s{:d})={{p{:d}, p{:d}}};\\n'.format(line_num, spline_or_line, line_num, temp_node_num - 1, 1))\n # line_num += 1\n\n # make a physical line of the splines\n for i in range(line_num - 1):\n fid.write(\n 'pl{:d}=newreg; Physical Line(pl{:d})={{s{:d}}};\\n'.format(phys_num, phys_num, i + 1))\n phys_num += 1\n\n # Make a line loop of these\n fid.write('ll{:d}=newreg; Line Loop(ll{:d})={{'.format(loop_num, loop_num))\n loop_num += 1\n\n for i in range(line_num - 2):\n fid.write('s{:d},'.format(i + 1))\n fid.write('s{:d}}};\\n'.format(line_num - 1))\n\n # Now do the cuts\n if cuts is not None:\n for i, cut in enumerate(cuts):\n\n # Do the points\n for j, point in enumerate(cut):\n fid.write(\n formatspec.format(node_num + j, node_num + j, point[0], point[1], cut_lc(i, j)))\n\n # save the node and number at the start of the cut\n temp_node_num = node_num\n # increment\n node_num += len(cut)\n\n # And now the splines\n fid.write('s{:d}=newreg; Spline(s{:d})={{p{:d}'.format(\n line_num, line_num, temp_node_num))\n for j in range(temp_node_num + 1, node_num):\n fid.write(',p{:d}'.format(j))\n fid.write(',p{:d}}};\\n'.format(temp_node_num))\n\n # Now make these line loops\n fid.write('ll{:d}=newreg; Line Loop(ll{:d})={{s{:d}}};\\n'.format(\n loop_num, loop_num, line_num))\n\n # And now the physical lines\n fid.write('pl{:d}=newreg; Physical Line(pl{:d})={{s{:d}}};\\n'.format(\n phys_num, phys_num, line_num))\n\n # Increment\n line_num += 1\n loop_num += 1\n phys_num += 1\n\n if points:\n # Specify the points for the target mesh size\n temp_node_num = node_num\n for i, group in enumerate(points):\n for j, point in enumerate(group):\n fid.write(\n formatspec.format(node_num + j, node_num + j, point[0], point[1], point_lc(i, j)))\n node_num += len(group)\n\n # Now form the surface\n # Start with the outline\n fid.write('ps1=newreg; Plane Surface(ps1)={ll1')\n # Add the cuts, backwards to exclude\n for cut_num in range(1, loop_num - 1):\n fid.write(',-ll{:d}'.format(cut_num + 1))\n # Close it\n fid.write('};\\n')\n\n if points is not None:\n # Put the target mesh points in the surface\n for pt_num in range(temp_node_num, node_num - 1):\n fid.write('Point{{p{:d}}} In Surface{{ps1}};\\n'.format(pt_num + 1))\n\n # Make the surface physical\n fid.write('ps2=newreg; Physical Surface(ps2)={ps1};\\n')\n\n # close the file\n fid.close()\n return None\n\n\ndef gmsh_outline_old(out_fn, outline, outlc, dividers, cuts, cutslc=None, points=None, pointslc=None):\n \"\"\" Turn an outline into a .geo type file for gmsh\n\n Parameters\n ----------\n out_fn : str\n Output filename. .geo or .txt extension recommended\n outline : tuple of lists\n Each list is an edge on the mesh. Must be in order, and the first element of each list is shared with the previous edge. i.e. ([1,2],[3,4]) make edges 1-2-3 and 3-4\n outline_lc : float, tuple of lists, or list\n Target mesh size for the outline. This can be an array if variable sizing is desired per edge, or a tuple of lists matching the outline in length.\n cuts : tuple of lists\n Format is the same as for outline, but each list will be looped to itself then removed from the original mesh.\n cuts_lc : float, tuple of lists, or list\n Target mesh size for cuts.\n points : list\n Points at which to target mesh size\n points_lc : list or float\n Target mesh size for those points\n \"\"\"\n\n fid = open(out_fn, 'w')\n fid.write('lc={:4.2f};\\n'.format(outlc.min()))\n formatspec = 'p{:d}=newp; Point(p{:d})={{{:4.3f}, {:4.3f}, 0.0, {:4.3f}}};\\n'\n for i in range(0, len(outline)):\n fid.write(\n formatspec.format(i + 1, i + 1, outline[i, 0], outline[i, 1], outlc[i, 0]))\n pl = 1\n lx = len(outline)\n lv = len(dividers)\n for k in range(2, lv + 1):\n fid.write('s{:d}=newreg; Spline(s{:d})={{p{:d}'.format(k, k, pl))\n for j in range(0, dividers[k - 2]):\n fid.write(',p{:d}'.format(pl + j + 1))\n fid.write('};\\n')\n fid.write(\n 'pl{:d}=newreg; Physical Line(pl{:d})={{s{:d}}};\\n'.format(k - 1, k - 1, k))\n pl = pl + dividers[k - 2]\n # fencepost\n fid.write('s{:d}=newreg; Spline(s{:d})={{p{:d}'.format(lv + 1, lv + 1, pl))\n for j in range(0, dividers[lv - 1]):\n fid.write(',p{:d}'.format(pl + j + 1))\n fid.write(',p{:d}'.format(1))\n fid.write('};\\n')\n fid.write(\n 'pl{:d}=newreg; Physical Line(pl{:d})={{s{:d}}};\\n'.format(lv, lv, lv + 1))\n pl = pl + dividers[k - 2]\n # lineloop\n fid.write('ll1=newreg; Line Loop(ll1)={s2')\n for j in range(3, lv + 2):\n fid.write(',s{:d}'.format(j))\n fid.write('};\\n')\n\n # do the stuff for the cuts\n ass = np.where(cuts[:, 0] == -2e9)\n a = np.r_[-1, ass[0], len(cuts)]\n dim = len(a)\n jcount = lx + 1\n n = 0\n for i in range(1, dim):\n for j in range(jcount, jcount + a[i] - a[i - 1] - 1):\n fid.write(\n formatspec.format(j, j, cuts[n, 0], cuts[n, 1], cutslc[n, 0]))\n n = n + 1\n for j in range(jcount, jcount + a[i] - a[i - 1] - 2):\n fid.write(\n 'l{:d}=newreg; Line(l{:d})={{p{:d},p{:d}}};\\n'.format(j, j, j, j + 1))\n fid.write('l{:d}=newreg; Line(l{:d})={{p{:d},p{:d}}};\\n'.format(\n jcount + a[i] - a[i - 1] - 2, jcount + a[i] - a[i - 1] - 2, jcount + a[i] - a[i - 1] - 2, jcount))\n fid.write('s{:d}=newreg; Spline(s{:d})={{p{:d}'.format(\n i + 1 + lv, i + 1 + lv, jcount))\n for j in range(jcount + 1, jcount + a[i] - a[i - 1] - 1):\n fid.write(',p{:d}'.format(j))\n fid.write(',p{:d}}};\\n'.format(jcount))\n fid.write('ll{:d}=newreg; Line Loop(ll{:d})={{s{:d}}};\\n'.format(\n 1 + i, 1 + i, i + lv + 1))\n fid.write('pl{:d}=newreg; Physical Line(pl{:d})={{s{:d}}};\\n'.format(\n i + lv, i + lv, i + lv + 1))\n jcount = jcount + a[i] - a[i - 1]\n n = n + 1\n # do the stuff for the target mesh size\n for i in range(0, len(points)):\n fid.write(formatspec.format(\n jcount + i, jcount + i, points[i, 0], points[i, 1], pointslc[i, 0]))\n fid.write('ps1=newreg; Plane Surface(ps1)={ll1')\n for i in range(0, dim - 1):\n fid.write(',-ll{:d}'.format(i + 2))\n fid.write('};\\n')\n for i in range(0, len(points)):\n fid.write('Point{{p{:d}}} In Surface{{ps1}};\\n'.format(jcount + i))\n fid.write('ps2=newreg; Physical Surface(ps2)={ps1};\\n')\n fid.close()\n return\n\n\ndef makemesh(big=10000, small=1000, layers=3, partitions=2, output='../elmer/Rough/nodes', gl=1, glp=0.1):\n import shp2xy\n import gmsh_outline_shp\n # give the node ranges for the different boundary types of the outline\n edges = [17, 4, 53, 4, 0] # for 5 cuts [3, 4, 53, 4, 0]\n\n # ster=shp2xy('../qgis/outlines/sterp')\n ster = shp2xy('../qgis/outlines/newoutline20140707')\n sterlc = np.empty([len(ster), 1])\n for i in range(0, len(ster)):\n sterlc[i, 0] = big\n cut1 = shp2xy('../qgis/outlines/cut1p')\n cut2 = shp2xy('../qgis/outlines/cut2p')\n cut3 = shp2xy('../qgis/outlines/cut6p')\n # cut4=shp2xy('../qgis/outlines/cut4p')\n # cut5=shp2xy('../qgis/outlines/cut5p')\n div = np.empty([1, 2])\n for i in range(0, 2):\n div[0, i] = -2e9\n cuts = np.r_[cut1, div, cut2, div, cut3] # ,div,cut4] #,div,cut5]\n\n cutslc = np.empty([len(cuts), 1])\n for i in range(0, len(cuts)):\n cutslc[i, 0] = big\n\n bounds = shp2xy('../qgis/outlines/bounditp')\n ground = shp2xy('../qgis/outlines/sterglp')\n blc = np.empty([len(bounds), 1])\n glc = np.empty([len(ground), 1])\n for i in range(0, len(bounds)):\n blc[i, 0] = big\n for i in range(0, len(ground)):\n glc[i, 0] = small\n points = np.r_[ground, bounds]\n pointslc = np.r_[glc, blc]\n gmsh_outline_shp(output, ster, sterlc, edges,\n cuts, cutslc, points, pointslc)\n os.system('gmsh ' + output + '.txt -o ' + output + '.msh -1 -2')\n os.system('elmergrid 14 2 ' + output + '.msh -autoclean')\n os.system('extrudemesh ' + output + ' ' + output + '3 ' + str(layers) +\n ' 1 1 0 ' + str(gl) + ' ' + str(glp) + ' 0 ../Meshgen/DEM 1000 3 -99999 1')\n os.system('extrudemesh ' + output + ' ' + output + 'ben ' + str(layers) +\n ' 1 1 0 ' + str(gl) + ' ' + str(glp) + ' 0 ../Meshgen/allbenDEM 1000 3 -99999 1')\n direc, s, msh = output.rpartition('/')\n os.chdir(direc)\n os.system('elmergrid 2 4 ' + msh + 'ben')\n getbot(fname=msh, layers=layers)\n return\n\n\ndef getbot(fname='nodes', layers=3):\n fid1 = open(fname)\n fid1.readline()\n fid1.readline()\n fid1.readline()\n fid1.readline()\n nodes = int(fid1.readline())\n nums = np.empty([nodes, 1])\n data = np.empty([nodes, 3])\n for i in range(0, nodes):\n line = fid1.readline()\n a, c, line = line.partition(' ')\n nums[i, 0] = int(a)\n for j in range(0, 3):\n b, c, line = line.partition(' ')\n data[i, j] = float(b)\n # direc,s,msh=fname.rpartition('/')\n fid2 = open(fname + 'bot.dat', 'w')\n bots = nodes / layers\n fid2.write('%d\\n' % bots)\n for i in range(0, nodes // layers):\n fid2.write('{:d} {:f} {:f} {:f}\\n'.format(\n i + 1, data[i, 0], data[i, 1], data[i, 2]))\n\n\ndef get_lc(lcs):\n \"\"\" Convenient way to deal with multiple input options. Returns appropriate object.\n\n Parameters\n ----------\n lcs : float, list, or tuple of arrays\n if float, all identical spacing. If list, should match number of edges. If tuple, each array should match the edge in size.\n \"\"\"\n if type(lcs) == float:\n return LC_const(lcs)\n if type(lcs) == list:\n return LC_list(lcs)\n if type(lcs) == tuple:\n return LC_tuple(lcs)\n\n\nclass LC:\n \"\"\"Base for target mesh size class\n\n Paramters\n ---------\n lcs: float, list, or tuple of arrays\n if float, all identical spacing. If list, should match number of edges. If tuple, each array should match the edge in size.\n\n Attributes\n ----------\n max : float\n The maximum value of the LCs.\n\n \"\"\"\n\n def __init__(self, lcs):\n pass\n\n def __call__(self, edge, node):\n \"\"\" Return the target size\n\n Parameters\n ----------\n edge : int\n The edge (or cut) number\n node : int\n The node (or point) number\n \"\"\"\n\n pass\n\n\nclass LC_const(LC):\n\n def __init__(self, lcs):\n self.lc = lcs\n self.max = self.lc\n\n def __call__(self, i, j):\n return self.lc\n\n\nclass LC_list(LC):\n\n def __init__(self, lcs):\n self.lc = lcs\n self.max = max(self.lc)\n\n def __call__(self, i, j):\n return self.lc[i]\n\n\nclass LC_tuple(LC):\n\n def __init__(self, lcs):\n self.lc = lcs\n self.max = max([max(l) for l in self.lc])\n\n def __call__(self, i, j):\n return self.lc[i][j]\n","sub_path":"modeltools/lib/gmshlib.py","file_name":"gmshlib.py","file_ext":"py","file_size_in_byte":20200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"52717636","text":"# USAGE\n# python yolo_video.py --input videos/airport.mp4 --output output/airport_output.avi --yolo yolo-coco\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport os\n# Writing to an excel \n# sheet using Python \nimport xlwt \nfrom xlwt import Workbook \nfrom datetime import datetime\nimport random\n#for training time series \nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import LSTM,Dense\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nfrom keras.layers import Dropout\nfrom utils import *\n# import datetime\nfrom sklearn.model_selection import train_test_split\nfrom datetime import datetime\n\nlook_back = 1\nforward_days = 2\nnum_periods = 1\nuse_dropout = False\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True,\n\thelp=\"path to input video\")\nap.add_argument(\"-o\", \"--output\", required=False,\n\thelp=\"path to output video\")\nap.add_argument(\"-y\", \"--yolo\", required=True,\n\thelp=\"base path to YOLO directory\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nap.add_argument(\"-t\", \"--threshold\", type=float, default=0.3,\n\thelp=\"threshold when applyong non-maxima suppression\")\nargs = vars(ap.parse_args())\n\n# load the COCO class labels our YOLO model was trained on\nlabelsPath = os.path.sep.join([args[\"yolo\"], \"coco.names\"])\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n# Workbook is created \nwb = Workbook() \n\n# add_sheet is used to create sheet. \nsheet1 = wb.add_sheet('Sheet 1') \nsheet1.write(0, 0, 'date') \nsheet1.write(0, 1, 'x1') \nsheet1.write(0, 2, 'x2') \nsheet1.write(0, 3, 'y1')\nsheet1.write(0, 4, 'y2') \n\n# initialize a list of colors to represent each possible class label\nnp.random.seed(42)\nCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\n\tdtype=\"uint8\")\n\n# derive the paths to the YOLO weights and model configuration\nweightsPath = os.path.sep.join([args[\"yolo\"], \"yolov3.weights\"])\nconfigPath = os.path.sep.join([args[\"yolo\"], \"yolov3.cfg\"])\n\n# load our YOLO object detector trained on COCO dataset (80 classes)\n# and determine only the *output* layer names that we need from YOLO\nprint(\"[INFO] loading YOLO from disk...\")\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# initialize the video stream, pointer to output video file, and\n# frame dimensions\nvs = cv2.VideoCapture(args[\"input\"])\nwriter = None\n(W, H) = (None, None)\n\n# try to determine the total number of frames in the video file\ntry:\n\tprop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \\\n\t\telse cv2.CAP_PROP_FRAME_COUNT\n\ttotal = int(vs.get(prop))\n\tprint(\"[INFO] {} total frames in video\".format(total))\n\n# an error occurred while trying to determine the total\n# number of frames in the video file\nexcept:\n\tprint(\"[INFO] could not determine # of frames in video\")\n\tprint(\"[INFO] no approx. completion time can be provided\")\n\ttotal = -1\n\n# loop over frames from the video file stream\ncounter = 0\nm = 0\nwhile counter < (0.8*total):\n\tcounter += 1\n\tprint(counter)\n\t# read the next frame from the file\n\t(grabbed, frame) = vs.read()\n\n\t# if the frame was not grabbed, then we have reached the end\n\t# of the stream\n\tif not grabbed:\n\t\tbreak\n\n\t# if the frame dimensions are empty, grab them\n\tif W is None or H is None:\n\t\t(H, W) = frame.shape[:2]\n\n\t# construct a blob from the input frame and then perform a forward\n\t# pass of the YOLO object detector, giving us our bounding boxes\n\t# and associated probabilities\n\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n\t\tswapRB=True, crop=False)\n\tnet.setInput(blob)\n\tstart = time.time()\n\tlayerOutputs = net.forward(ln)\n\tend = time.time()\n\n\t# initialize our lists of detected bounding boxes, confidences,\n\t# and class IDs, respectively\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\n\t# loop over each of the layer outputs\n\n\tfor output in layerOutputs:\n\t\t# loop over each of the detections\n\t\tfor detection in output:\t\t\n\t\t\t# extract the class ID and confidence (i.e., probability)\n\t\t\t# of the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > args[\"confidence\"]:\n\t\t\t\tm = m + 1\n\t\t\t\t# scale the bounding box coordinates back relative to\n\t\t\t\t# the size of the image, keeping in mind that YOLO\n\t\t\t\t# actually returns the center (x, y)-coordinates of\n\t\t\t\t# the bounding box followed by the boxes' width and\n\t\t\t\t# height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top\n\t\t\t\t# and and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# save into xls file\n\t\t\t\tnow = datetime.now()\n\t\t\t\tdt_string = now.strftime(\"%H:%M:%S\")\n\t\t\t\tsheet1.write(m, 0, dt_string) \n\t\t\t\tsheet1.write(m, 1, str(x)) \n\t\t\t\tsheet1.write(m, 2, str(x+width)) \n\t\t\t\tsheet1.write(m, 3, str(y)) \n\t\t\t\tsheet1.write(m, 4, str(y+height)) \n\n\t\t\t\t# update our list of bounding box coordinates,\n\t\t\t\t# confidences, and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\twb.save('train_bounding_boxes.xlsx')\n\t# apply non-maxima suppression to suppress weak, overlapping\n\t# bounding boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, args[\"confidence\"],\n\t\targs[\"threshold\"])\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\t# draw a bounding box rectangle and label on the frame\n\t\t\tcolor = [int(c) for c in COLORS[classIDs[i]]]\n\t\t\t# print(f1 , f2 , f3)\n\t\t\tf = int(w/3.5)\n\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n\t\t\t# text = \"{}: {:.4f}\".format(LABELS[classIDs[i]],\n\t\t\t# \tconfidences[i])\n\t\t\t# cv2.putText(frame, text, (x, y - 5),\n\t\t\t# \tcv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\n\t# check if the video writer is None\n\tif writer is None:\n\t\t# initialize our video writer\n\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\twriter = cv2.VideoWriter('output/train.avi', fourcc, 30,\n\t\t\t(frame.shape[1], frame.shape[0]), True)\n\n\t\t# some information on processing single frame\n\t\tif total > 0:\n\t\t\telap = (end - start)\n\t\t\tprint(\"[INFO] single frame took {:.4f} seconds\".format(elap))\n\t\t\tprint(\"[INFO] estimated total time to finish: {:.4f}\".format(\n\t\t\t\telap * total))\n\n\t# write the output frame to disk\n\twriter.write(frame)\n\n#preprocessing for time series prediction\n\ndf = load_ds('train_bounding_boxes.xlsx')\narray = df.values.reshape(df.shape[0],4)\n\n\n#split in Train and Test\n\ndivision = len(array) - num_periods*forward_days\n# array_test = array[division-look_back:]\narray_train = array[:division]\n\ndef processData(data, look_back, forward_days,jump=1):\n X,Y = [],[]\n for i in range(0,len(data) -look_back -forward_days +1, jump):\n X.append(data[i:(i+look_back)])\n Y.append(data[(i+look_back):(i+look_back+forward_days)])\n return np.array(X),np.array(Y)\n\n# X_test,y_test = processData(array_test,look_back,forward_days,forward_days)\n# y_test = np.array([list(a.ravel()) for a in y_test])\n\nX,y = processData(array_train,look_back,forward_days)\ny = np.array([list(a.ravel()) for a in y])\nX_train, X_validate, y_train, y_validate = train_test_split(X, y, test_size=0.20, random_state=42)\n\nNUM_NEURONS_FirstLayer = 64\nNUM_NEURONS_SecondLayer = 32\nEPOCHS = 50\n\n#Build the model\nmodel = Sequential()\nmodel.add(LSTM(NUM_NEURONS_FirstLayer,input_shape=(look_back,4), return_sequences=True))\nmodel.add(LSTM(NUM_NEURONS_SecondLayer,input_shape=(NUM_NEURONS_FirstLayer,4)))\nif use_dropout:\n model.add(Dropout(0.25))\nmodel.add(Dense(4*forward_days))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.summary()\nhistory = model.fit(X_train,y_train,epochs=EPOCHS,validation_data=(X_validate,y_validate),shuffle=False,batch_size=2, verbose=2)\nfile_name = './LSTM_compA2_LB{}_FD{}_E{}_F{}_S{}.h5'.format(look_back, forward_days, EPOCHS, NUM_NEURONS_FirstLayer, NUM_NEURONS_SecondLayer)\nmodel.save(file_name)\nprint(\"Saved model `{}` to disk\".format(file_name))\n################################\nwriter = None\n\nwhile True :\n\t# read the next frame from the file\n\t(grabbed, frame) = vs.read()\n\n\t# if the frame was not grabbed, then we have reached the end\n\t# of the stream\n\tif not grabbed:\n\t\tbreak\n\n\t# if the frame dimensions are empty, grab them\n\tif W is None or H is None:\n\t\t(H, W) = frame.shape[:2]\n\n\t# construct a blob from the input frame and then perform a forward\n\t# pass of the YOLO object detector, giving us our bounding boxes\n\t# and associated probabilities\n\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n\t\tswapRB=True, crop=False)\n\tnet.setInput(blob)\n\tstart = time.time()\n\tlayerOutputs = net.forward(ln)\n\tend = time.time()\n\n\t# initialize our lists of detected bounding boxes, confidences,\n\t# and class IDs, respectively\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\n\t# loop over each of the layer outputs\n\tfor output in layerOutputs:\n\t\t# loop over each of the detections\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability)\n\t\t\t# of the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > args[\"confidence\"]:\n\t\t\t\t# scale the bounding box coordinates back relative to\n\t\t\t\t# the size of the image, keeping in mind that YOLO\n\t\t\t\t# actually returns the center (x, y)-coordinates of\n\t\t\t\t# the bounding box followed by the boxes' width and\n\t\t\t\t# height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top\n\t\t\t\t# and and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates,\n\t\t\t\t# confidences, and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping\n\t# bounding boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, args[\"confidence\"],\n\t\targs[\"threshold\"])\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\t# draw a bounding box rectangle and label on the frame\n\t\t\tcolor = [int(c) for c in COLORS[classIDs[i]]]\n\t\t\tX_test = np.array([[[x,(x+w),y,(y+h)]]])\n\t\t\tXt = model.predict(X_test)\n\t\t\tprint(X_test)\n\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n\t\t\tcv2.rectangle(frame, (Xt[0][0], Xt[0][2]), (Xt[0][1], Xt[0][3]), color, 2)\n\t\t\tcv2.rectangle(frame, (Xt[0][4], Xt[0][6]), (Xt[0][5], Xt[0][7]), color, 2)\n\t\t\t# cv2.circle(frame,(x+w/2,y+h/2), 3, (0,0,0), -1)\n\t\t\t# text = \"{}: {:.4f}\".format(LABELS[classIDs[i]],\n\t\t\t# \tconfidences[i])\n\t\t\t# cv2.putText(frame, text, (x, y - 5),\n\t\t\t# \tcv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\n\t# check if the video writer is None\n\tif writer is None:\n\t\t# initialize our video writer\n\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\twriter = cv2.VideoWriter('output/test.avi', fourcc, 30,\n\t\t\t(frame.shape[1], frame.shape[0]), True)\n\n\t\t# some information on processing single frame\n\t\tif total > 0:\n\t\t\telap = (end - start)\n\t\t\tprint(\"[INFO] single frame took {:.4f} seconds\".format(elap))\n\t\t\tprint(\"[INFO] estimated total time to finish: {:.4f}\".format(\n\t\t\t\telap * total))\n\n\t# write the output frame to disk\n\twriter.write(frame)\n################################3\n\n\n# release the file pointers\nprint(\"[INFO] cleaning up...\")\nwriter.release()\nvs.release()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"124196121","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport cv2\nimport time\nimport zmq\nfrom numpy.core.multiarray import ndarray\nfrom pygame import mixer\n\nfrom System.Data.CONSTANTS import *\nfrom System.Controller.JsonEncoder import JsonEncoder\n\nclass WorkerThread(QObject):\n receive = pyqtSignal(dict)\n\n def __init__(self):\n super().__init__()\n context = zmq.Context()\n self.socket = context.socket(zmq.REP)\n self.socket.bind(\"tcp://\"+GUIIP+\":\"+str(GUIPORT))\n\n @pyqtSlot()\n def run(self):\n while True:\n message = self.socket.recv_pyobj() # receive a message json\n self.socket.send_pyobj(\"\")\n self.receive.emit(message)\n\n\nclass SearchForm(QWidget):\n def __init__(self, port=GUIPORT, ip=GUIIP):\n super().__init__()\n\n self.encoder = JsonEncoder()\n\n self.setWindowIcon(QIcon('UI\\\\icon.png'))\n self.setStyleSheet(\"background-color: #D3D3D3;\")\n self.setWindowTitle('Argus - User Interface')\n # self.setStyleSheet(open('style.css').read())\n\n # self.setGeometry(330, 150, 731, 438)\n self.setGeometry(350, 50, 760, 850)\n # self.setWindowFlags(Qt.FramelessWindowHint)\n\n oImage = QImage(\"UI\\\\Untitled.png\")\n sImage = oImage.scaled(QSize(300, 200)) # resize Image to widgets size\n palette = QPalette()\n palette.setBrush(QPalette.Window, QBrush(sImage))\n self.setPalette(palette)\n\n self.worker = WorkerThread()\n self.workerThread = QThread()\n self.workerThread.started.connect(self.worker.run) # Init worker run() at startup (optional)\n self.worker.receive.connect(self.decode) # Connect your signals/slots\n self.worker.moveToThread(self.workerThread) # Move the Worker object to the Thread object\n self.workerThread.start()\n\n self.make_lable('Date', 60, 0, 61, 41, True, 12)\n self.make_lable('From', 10, 40, 41, 21, True, 10)\n self.make_lable('To', 10, 70, 41, 21, True, 10)\n\n self.make_lable('Time', 250, 0, 61, 41, True, 12)\n self.make_lable('From', 190, 40, 41, 21, True, 10)\n self.make_lable('To', 190, 70, 41, 21, True, 10)\n\n self.make_lable('Location', 430, 0, 71, 41, True, 12)\n self.make_lable('City', 370, 40, 41, 21, True, 10)\n self.make_lable('District', 370, 70, 50, 21, True, 10)\n\n self.startDate = QDateEdit(self)\n self.startDate.move(50, 40)\n self.startDate.resize(110, 22)\n self.startDate.setDate(QDate.fromString('01/01/2015', \"dd/MM/yyyy\"))\n self.endDate = QDateEdit(self)\n self.endDate.move(50, 70)\n self.endDate.resize(110, 22)\n self.endDate.setDate(QDate.fromString('01/01/2015', \"dd/MM/yyyy\"))\n\n self.startTime = QTimeEdit(self)\n self.startTime.setDisplayFormat('hh:mm')\n self.startTime.move(230, 40)\n self.startTime.resize(110, 22)\n self.endTime = QTimeEdit(self)\n self.endTime.setDisplayFormat('hh:mm')\n self.endTime.move(230, 70)\n self.endTime.resize(110, 22)\n\n self.city = QLineEdit(self)\n self.city.move(430, 40)\n self.city.resize(110, 22)\n\n self.loc = QLineEdit(self)\n self.loc.move(430, 70)\n self.loc.resize(110, 22)\n\n search = QPushButton(self)\n search.setText('Search')\n search.move(560, 10)\n search.resize(71, 51)\n search.clicked.connect(self.searchClicked)\n\n recent = QPushButton(self)\n recent.setText('Recent')\n recent.move(560, 70)\n recent.resize(71, 25)\n recent.clicked.connect(self.recentlyClicked)\n\n self.results = QListWidget(self)\n self.results.move(20, 115)\n self.results.resize(720, 720)\n self.results.itemDoubleClicked.connect(self.listwidgetClicked)\n self.results.setStyleSheet(\"background-color: #C0C0C0;\")\n\n widgetText = QLabel(self)\n widgetText.move(650, 15)\n img = QImage(\"UI\\\\logo.png\")\n img.convertToFormat(QImage.Format_ARGB32)\n pixmap = QPixmap(img)\n pixmap = pixmap.scaled(124, 84, Qt.KeepAspectRatio)\n widgetText.setPixmap(pixmap)\n\n # self.appendToList(list=True)\n # self.appendToList(list=False)\n print(\"hello\")\n self.recentlyClicked()\n\n def listwidgetClicked(self, item):\n item = self.results.itemWidget(item)\n info = item.children()[-1]\n startFrameID, cameraID = info.text().split(',')\n print(startFrameID, cameraID)\n self.encoder.requestVideo(camera_id=int(cameraID), starting_frame_id=int(startFrameID))\n return\n\n\n def searchClicked(self):\n city = None\n district = None\n\n if self.city.text() != '':\n city = self.city.text()\n if self.loc.text() != '':\n district = self.loc.text()\n\n self.encoder.requestData(self.startDate.text(), self.endDate.text(), self.startTime.text(), self.endTime.text(),\n city, district)\n\n def recentlyClicked(self):\n self.encoder.getRecentCrashes()\n\n\n def appendToList(self, ID=3, Image=None, Date='a', Time='d', City='f', Location='g', startFrame=1, list=True):\n itemN = QListWidgetItem()\n widget = QWidget()\n\n widgetText = QLabel()\n if not isinstance(Image,ndarray) :\n img = cv2.imread('UI\\\\notfound.png')\n else:\n img = Image\n img = cv2.resize(img, (120, 100), interpolation=cv2.INTER_AREA)\n height, width, channel = img.shape\n bytesPerLine = 3 * width\n qImg = QImage(img.data, width, height, bytesPerLine, QImage.Format_BGR888)\n pixmap = QPixmap(qImg)\n widgetText.setPixmap(pixmap)\n widgetText.resize(20, 20)\n\n startFrameID = QLabel()\n startFrameID.setText(str(startFrame)+','+str(ID))\n startFrameID.hide()\n print(startFrameID.text())\n\n info = QLabel()\n font = QFont('SansSerif', 10)\n font.setBold(True)\n info.setFont(font)\n info.setText(' Camera Id: ' + str(ID) + ' Date: ' + str(Date) + ' City: ' + str(City)\n + ' Location: ' + str(Location))\n widgetLayout = QHBoxLayout()\n widgetLayout.addWidget(widgetText)\n widgetLayout.addWidget(info)\n widgetLayout.addWidget(startFrameID)\n widgetLayout.addStretch()\n widgetLayout.setSizeConstraint(QLayout.SetFixedSize)\n widget.setLayout(widgetLayout)\n widget.setStyleSheet(\"background-color: none;\")\n # widget.paintEvent()\n itemN.setSizeHint(widget.sizeHint())\n if list:\n self.results.addItem(itemN)\n self.results.setItemWidget(itemN, widget)\n\n else:\n mixer.init()\n mixer.music.load('UI\\\\siren.mp3')\n mixer.music.play()\n\n itemN.setBackground(QColor('#7fc97f'))\n self.results.insertItem(0, itemN)\n self.results.setItemWidget(itemN, widget)\n\n\n\n def make_lable(self, text, x, y, width, height, bold=False, font=12):\n label = QLabel(self)\n label.setText(text)\n label.move(x, y)\n label.resize(width, height)\n font = QFont('SansSerif', font)\n if bold:\n font.setBold(True)\n label.setFont(font)\n # label.setStyleSheet(open('style.css').read())\n\n return label\n\n def playVideo(self, video):\n for i in range(len(video)):\n cv2.imshow('Frame', video[i])\n if cv2.waitKey(31) & 0xFF == ord('q'):\n break\n cv2.destroyAllWindows()\n\n\n\n\n\n def decode(self, msg):\n func = msg[FUNCTION]\n\n if func == REP_QUERY:\n self.results.clear()\n list = msg[LIST_OF_CRASHES]\n for item in list:\n self.appendToList(ID=item[CAMERA_ID], Image=item[CRASH_PIC], Date=item[CRASH_TIME], Time=item[CRASH_TIME],\n City=item[CITY], Location=item[DISTRICT], startFrame=item[STARTING_FRAME_ID], list=True)\n return\n\n if func == NOTIFICATION:\n self.appendToList(ID=msg[CAMERA_ID], Image=msg[CRASH_PIC], Date=msg[CRASH_TIME], Time=msg[CRASH_TIME],\n City=msg[CITY], Location=msg[DISTRICT], startFrame=msg[STARTING_FRAME_ID], list=False)\n return\n\n if func == REP_VIDEO:\n self.playVideo(msg[FRAMES])\n return\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n app.setStyle('Fusion')\n form = SearchForm()\n form.show()\n sys.exit(app.exec_())","sub_path":"RunGui.py","file_name":"RunGui.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"508755015","text":"import torch, numpy as np\nnn = torch.nn\nimport collections, numbers, functools, tree\nimport pytorch_lightning as pl\nimport os, gzip, json, glob\nfrom Bio.SVDSuperimposer import SVDSuperimposer\n\nfrom torch.utils.checkpoint import checkpoint_sequential, checkpoint\n# if torch.cuda.is_available():\n# import deepspeed\n# checkpoint = deepspeed.checkpointing.checkpoint\n# else:\n# checkpoint = lambda x:x\n\nclass VarianceScaling:\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \"\"\"\n def __init__(self, scale=1.0, mode='fan_in', distribution='truncated_normal'):\n op = {'fan_in':lambda x:x[0], 'fan_out':lambda x:x[1], 'fan_avg':lambda x:0.5*sum(x)}\n self.fn = op[mode]\n if scale < 0.0: raise ValueError('`scale` must be a positive float.')\n d = {\n 'normal': lambda c, sh, dt, dv: torch.randn(*sh, dtype=dt, device=dv)*((scale/c)**0.5), \n 'truncated_normal': lambda c, sh, dt, dv: (torch.sqrt(-2*torch.log(\n torch.rand(*sh, dtype=dt, device=dv)*(1-np.exp(-2)) + np.exp(-2))) * torch.cos(\n 2*np.pi*torch.rand(*sh, dtype=dt, device=dv)))*scale/c,# box-muller\n 'uniform': lambda c, sh, dt, dv: (torch.rand(*sh, dtype=dt, device=dv)-0.5)*2*((3.*scale/c)**0.5)\n }\n self.dist = d[distribution.lower()]\n self.shape2fan = [lambda sh:(1,1), lambda sh:(sh[0],sh[0]), lambda sh:(sh[0],sh[1])]\n\n def __call__(self, shape, dtype, device):\n if len(shape) < len(self.shape2fan):\n fan_in, fan_out = self.shape2fan[len(shape)](shape)\n else:\n fan_in, fan_out = shape[-2] * np.prod(shape[:-2]), shape[-1] * np.prod(shape[:-2])\n return self.dist(max(1.0, self.fn([fan_in, fan_out])), shape, dtype, device)\n\ndef glorot_uniform(*shape, dtype=torch.float, device=None):\n return VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform')(shape, dtype, device)\n\ndef glorot_normal(*shape, dtype=torch.float, device=None):\n return VarianceScaling(scale=1.0, mode='fan_avg', distribution='truncated_normal')(shape, dtype, device)\n\ndef lecun_uniform(*shape, dtype=torch.float, device=None):\n return VarianceScaling(scale=1.0, mode='fan_in', distribution='uniform')(shape, dtype, device)\n\ndef lecun_normal(*shape, dtype=torch.float, device=None):\n return VarianceScaling(scale=1.0, mode='fan_in', distribution='truncated_normal')(shape, dtype, device)\n\ndef he_uniform(*shape, dtype=torch.float, device=None):\n return VarianceScaling(scale=2.0, mode='fan_in', distribution='uniform')(shape, dtype, device)\n\ndef he_normal(*shape, dtype=torch.float, device=None):\n return VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal')(shape, dtype, device)\n\ndef makeLinear(num_input: int,\n num_output: int,\n dtype: torch.dtype,\n device: torch.device,\n initializer: str = 'linear',\n use_bias: bool = True,\n bias_init: float = 0.,\n ):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \"\"\"\n init = {'linear': lambda *s,dtype=dtype: lecun_normal(*s, dtype=dtype, device=device), \n 'relu': lambda *s,dtype=dtype: he_normal(*s, dtype=dtype, device=device), \n 'zeros': lambda *s,dtype=dtype: torch.zeros(*s, dtype=dtype, device=device)}\n assert initializer in init\n \n lin = nn.Linear(num_input, num_output, bias=use_bias, device=device)\n lin.weight.data = init[initializer](num_output, num_input, dtype=dtype)\n \n if use_bias: lin.bias.data.fill_(bias_init).to(dtype)\n return lin\n\n\ndef mask_mean(mask, value, axis=None, drop_mask_channel=False, eps=1e-10):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Masked mean.\n \"\"\"\n if drop_mask_channel:\n mask = mask[..., 0]\n\n mask_shape = mask.shape\n value_shape = value.shape\n\n assert len(mask_shape) == len(value_shape)\n\n if isinstance(axis, numbers.Integral):\n axis = [axis]\n elif axis is None:\n axis = list(range(len(mask_shape)))\n assert isinstance(axis, collections.Iterable), (\n 'axis needs to be either an iterable, integer or \"None\"')\n\n broadcast_factor = 1.\n for axis_ in axis:\n value_size = value_shape[axis_]\n mask_size = mask_shape[axis_]\n if mask_size == 1:\n broadcast_factor *= value_size\n else:\n assert mask_size == value_size\n\n return (mask * value).sum(axis) / (mask.sum(axis) * broadcast_factor + eps)\n\n\nclass Attention(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Multihead attention.\n \"\"\"\n def __init__(self, config, global_config, output_dim):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.output_dim = output_dim\n self.forward = self.init_parameters\n \n def init_parameters(self, q_data, m_data, bias, nonbatched_bias=None):\n dt = q_data.dtype\n\n key_dim = self.config.get('key_dim', int(q_data.size(-1)))\n val_dim = self.config.get('value_dim', int(m_data.size(-1)))\n N_head = self.config.num_head\n \n assert key_dim % N_head == 0\n assert val_dim % N_head == 0\n self.key_dim = key_dim // N_head\n val_dim = val_dim // N_head\n\n p = lambda d_dim, prj_dim:nn.Parameter(glorot_uniform(d_dim, N_head, prj_dim, dtype=dt, device=self.global_config.device))\n self.query_w = p(q_data.size(-1), self.key_dim)\n self.key_w = p(m_data.size(-1), self.key_dim)\n self.value_w = p(m_data.size(-1), val_dim)\n\n if self.config.gating:\n self.gating_w = nn.Parameter(torch.zeros(q_data.size(-1), N_head, val_dim, dtype=dt, device=self.global_config.device))\n self.gating_b = nn.Parameter(torch.ones(N_head, val_dim, dtype=dt, device=self.global_config.device))\n \n init = torch.zeros if self.global_config.zero_init else glorot_uniform\n self.output_w = nn.Parameter(init(N_head, val_dim, self.output_dim, dtype=dt, device=self.global_config.device))\n self.output_b = nn.Parameter(torch.zeros(self.output_dim, dtype=dt, device=self.global_config.device))\n\n self.forward = self.go\n return self(q_data, m_data, bias, nonbatched_bias)\n\n def go(self, q_data, m_data, bias, nonbatched_bias=None):\n \"\"\"\n Arguments:\n q_data: A tensor of queries, shape [batch_size, N_queries, q_channels].\n m_data: A tensor of memories from which the keys and values are\n projected, shape [batch_size, N_keys, m_channels].\n bias: A bias for the attention, shape [batch_size, N_queries, N_keys].\n nonbatched_bias: Shared bias, shape [N_queries, N_keys].\n\n Returns:\n A float32 tensor of shape [batch_size, N_queries, output_dim].\n \"\"\"\n q = torch.einsum('bqa,ahc->bqhc', q_data, self.query_w) * self.key_dim**(-0.5)\n k = torch.einsum('bka,ahc->bkhc', m_data, self.key_w)\n v = torch.einsum('bka,ahc->bkhc', m_data, self.value_w)\n\n logits = torch.einsum('bqhc,bkhc->bhqk', q, k) + bias\n if nonbatched_bias is not None: logits += nonbatched_bias[None,...]\n\n weights = torch.functional.F.softmax(logits, dim=-1)\n weighted_avg = torch.einsum('bhqk,bkhc->bqhc', weights, v)\n\n if self.config.gating:\n gate_values = torch.einsum('bqc, chv->bqhv', q_data, self.gating_w)\n gate_values = torch.sigmoid(gate_values + self.gating_b)\n weighted_avg *= gate_values\n\n output = torch.einsum('bqhc,hco->bqo', weighted_avg, self.output_w) + self.output_b\n return output\n\n\nclass GlobalAttention(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Global attention.\n\n Jumper et al. (2021) Suppl. Alg. 19 \"MSAColumnGlobalAttention\" lines 2-7\n \"\"\"\n def __init__(self, config, global_config, output_dim):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.output_dim = output_dim\n self.forward = self.init_parameters\n \n def init_parameters(self, q_data, m_data, q_mask, bias):\n dt = q_data.dtype\n\n key_dim = self.config.get('key_dim', int(q_data.size(-1)))\n val_dim = self.config.get('value_dim', int(m_data.size(-1)))\n N_head = self.config.num_head\n \n assert key_dim % N_head == 0\n assert val_dim % N_head == 0\n self.key_dim = key_dim // N_head\n val_dim = val_dim // N_head\n\n self.query_w = nn.Parameter(glorot_uniform(q_data.size(-1), N_head, self.key_dim, dtype=dt, device=self.global_config.device))\n self.key_w = nn.Parameter(glorot_uniform(m_data.size(-1), self.key_dim, dtype=dt, device=self.global_config.device))\n self.value_w = nn.Parameter(glorot_uniform(m_data.size(-1), val_dim, dtype=dt, device=self.global_config.device))\n\n init = torch.zeros if self.global_config.zero_init else glorot_uniform\n self.output_w = nn.Parameter(init(N_head, val_dim, self.output_dim, dtype=dt, device=self.global_config.device))\n self.output_b = nn.Parameter(torch.zeros(self.output_dim, dtype=dt, device=self.global_config.device))\n\n if self.config.gating:\n self.gating_w = nn.Parameter(torch.zeros(q_data.size(-1), N_head, val_dim, dtype=dt, device=self.global_config.device))\n self.gating_b = nn.Parameter(torch.ones(N_head, val_dim, dtype=dt, device=self.global_config.device))\n\n self.forward = self.go\n return self(q_data, m_data, q_mask, bias)\n\n def go(self, q_data, m_data, q_mask, bias):\n \"\"\"\n Arguments:\n q_data: A tensor of queries with size [batch_size, N_queries,\n q_channels]\n m_data: A tensor of memories from which the keys and values\n projected. Size [batch_size, N_keys, m_channels]\n q_mask: A binary mask for q_data with zeros in the padded sequence\n elements and ones otherwise. Size [batch_size, N_queries, q_channels]\n (or broadcastable to this shape).\n bias: A bias for the attention.\n\n Returns:\n A float32 tensor of size [batch_size, N_queries, output_dim].\n \"\"\"\n \n v = torch.einsum('bka,ac->bkc', m_data, self.value_w)\n\n q_avg = mask_mean(q_mask, q_data, axis=1)\n\n q = torch.einsum('ba,ahc->bhc', q_avg, self.query_w) * self.key_dim**(-0.5)\n k = torch.einsum('bka,ac->bkc', m_data, self.key_w)\n bias = (1e9 * (q_mask[:, None, :, 0] - 1.))\n logits = torch.einsum('bhc,bkc->bhk', q, k) + bias\n weights = torch.functional.F.softmax(logits, dim=-1)\n weighted_avg = torch.einsum('bhk,bkc->bhc', weights, v)\n\n if self.config.gating:\n gate_values = torch.einsum('bqc, chv->bqhv', q_data, self.gating_w)\n gate_values = torch.sigmoid(gate_values + self.gating_b)\n weighted_avg = weighted_avg[:, None] * gate_values\n output = torch.einsum('bqhc,hco->bqo', weighted_avg, self.output_w) + self.output_b\n else:\n output = torch.einsum('bhc,hco->bo', weighted_avg, self.output_w) + self.output_b\n output = output[:, None]\n return output\n\n\nclass MSARowAttentionWithPairBias(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n MSA per-row attention biased by the pair representation.\n\n Jumper et al. (2021) Suppl. Alg. 7 \"MSARowAttentionWithPairBias\"\n \"\"\"\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, msa_act, msa_mask, pair_act, is_training=False):\n dt = msa_act.dtype\n\n self.query_norm = nn.LayerNorm(msa_act.size(-1), elementwise_affine=True, device=self.global_config.device).to(dt)\n self.feat_2d_norm = nn.LayerNorm(pair_act.size(-1), elementwise_affine=True, device=self.global_config.device).to(dt)\n\n self.feat_2d_weights = nn.Parameter(torch.randn(pair_act.shape[-1], self.config.num_head, \n dtype=dt, device=self.global_config.device) / (pair_act.size(-1)**0.5))\n\n self.attention = Attention(self.config, self.global_config, msa_act.size(-1))\n\n self.forward = self.go\n return self(msa_act, msa_mask, pair_act, is_training)\n\n def go(self, msa_act, msa_mask, pair_act, is_training=False):\n \"\"\"\n Arguments:\n msa_act: [N_seq, N_res, c_m] MSA representation.\n msa_mask: [N_seq, N_res] mask of non-padded regions.\n pair_act: [N_res, N_res, c_z] pair representation.\n is_training: Whether the module is in training mode.\n\n Returns:\n Update to msa_act, shape [N_seq, N_res, c_m].\n \"\"\"\n assert len(msa_act.shape) == 3\n assert len(msa_mask.shape) == 2\n assert self.config.orientation == 'per_row'\n\n bias = (1e9 * (msa_mask - 1.))[:, None, None, :]\n assert len(bias.shape) == 4\n\n msa_act = self.query_norm(msa_act)\n pair_act = self.feat_2d_norm(pair_act)\n nonbatched_bias = torch.einsum('qkc,ch->hqk', pair_act, self.feat_2d_weights)\n \n msa_act = self.attention(msa_act, msa_act, bias, nonbatched_bias)\n return msa_act\n\n\nclass TriangleAttention(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Triangle Attention.\n\n Jumper et al. (2021) Suppl. Alg. 13 \"TriangleAttentionStartingNode\"\n Jumper et al. (2021) Suppl. Alg. 14 \"TriangleAttentionEndingNode\"\n \"\"\"\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n \n def init_parameters(self, pair_act, pair_mask, is_training=False):\n dt = pair_act.dtype\n self.query_norm = nn.LayerNorm(pair_act.size(-1), elementwise_affine=True, device=self.global_config.device).to(dt)\n self.feat_2d_weights = nn.Parameter(torch.randn(pair_act.shape[-1], self.config.num_head, \n dtype=dt, device=self.global_config.device) / (pair_act.size(-1)**0.5))\n self.attention = Attention(self.config, self.global_config, pair_act.shape[-1])\n\n self.forward = self.go\n return self(pair_act, pair_mask, is_training)\n\n def go(self, pair_act, pair_mask, is_training=False):\n \"\"\"\n Arguments:\n pair_act: [N_res, N_res, c_z] pair activations tensor\n pair_mask: [N_res, N_res] mask of non-padded regions in the tensor.\n is_training: Whether the module is in training mode.\n\n Returns:\n Update to pair_act, shape [N_res, N_res, c_z].\n \"\"\"\n assert len(pair_act.shape) == 3\n assert len(pair_mask.shape) == 2\n assert self.config.orientation in ['per_row', 'per_column']\n\n if self.config.orientation == 'per_column':\n pair_act = torch.swapaxes(pair_act, -2, -3)\n pair_mask = torch.swapaxes(pair_mask, -1, -2)\n\n bias = (1e9 * (pair_mask - 1.))[:, None, None, :]\n assert len(bias.shape) == 4\n\n pair_act = self.query_norm(pair_act)\n\n nonbatched_bias = torch.einsum('qkc,ch->hqk', pair_act, self.feat_2d_weights)\n pair_act = self.attention(pair_act, pair_act, bias, nonbatched_bias)\n\n if self.config.orientation == 'per_column':\n pair_act = torch.swapaxes(pair_act, -2, -3)\n return pair_act\n\n\nclass TriangleMultiplication(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Triangle multiplication layer (\"outgoing\" or \"incoming\").\n\n Jumper et al. (2021) Suppl. Alg. 11 \"TriangleMultiplicationOutgoing\"\n Jumper et al. (2021) Suppl. Alg. 12 \"TriangleMultiplicationIncoming\"\n \"\"\"\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, act, mask, is_training=True):\n dt = act.dtype\n \n channel = act.size(-1)\n intermed_c = self.config.num_intermediate_channel\n \n self.layer_norm_input = nn.LayerNorm(channel, elementwise_affine=True, device=self.global_config.device).to(dt)\n\n self.left_projection = makeLinear(channel, intermed_c, dt, self.global_config.device)\n self.right_projection = makeLinear(channel, intermed_c, dt, self.global_config.device)\n\n init = 'zeros' if self.global_config.zero_init else 'linear'\n \n self.left_gate = makeLinear(channel, intermed_c, dt, self.global_config.device, initializer=init, bias_init=1.)\n self.right_gate = makeLinear(channel, intermed_c, dt, self.global_config.device, initializer=init, bias_init=1.)\n \n self.center_layer_norm = nn.LayerNorm(intermed_c, elementwise_affine=True, device=self.global_config.device).to(dt)\n\n self.output_projection = makeLinear(intermed_c, channel, dt, self.global_config.device, initializer=init)\n self.gating_linear = makeLinear(channel, channel, dt, self.global_config.device, initializer=init, bias_init=1.)\n\n self.forward = self.go\n return self(act, mask, is_training)\n\n def go(self, act, mask, is_training=True):\n \"\"\"\n Arguments:\n act: Pair activations, shape [N_res, N_res, c_z]\n mask: Pair mask, shape [N_res, N_res].\n is_training: Whether the module is in training mode.\n\n Returns:\n Outputs, same shape/type as act.\n \"\"\"\n mask = mask[..., None]\n\n act = self.layer_norm_input(act)\n input_act = act.clone()# not sure about this in the origiinal code\n\n left_proj_act = mask * self.left_projection(act)\n right_proj_act = mask * self.right_projection(act)\n\n left_gate_values = torch.sigmoid(self.left_gate(act))\n right_gate_values = torch.sigmoid(self.right_gate(act))\n\n left_proj_act *= left_gate_values\n right_proj_act *= right_gate_values\n\n act = torch.einsum(self.config.equation, left_proj_act, right_proj_act)\n act = self.center_layer_norm(act)\n act = self.output_projection(act)\n\n gate_values = torch.sigmoid(self.gating_linear(input_act))\n act *= gate_values\n return act\n\n\nclass Transition(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Transition layer.\n\n Jumper et al. (2021) Suppl. Alg. 9 \"MSATransition\"\n Jumper et al. (2021) Suppl. Alg. 15 \"PairTransition\"\n \"\"\"\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, act, mask, is_training=True):\n dt = act.dtype\n _, _, nc = act.shape\n\n num_intermediate = int(nc * self.config.num_intermediate_factor)\n mask = mask[...,None]\n\n self.input_layer_norm = nn.LayerNorm(nc, elementwise_affine=True, device=self.global_config.device).to(dt)\n\n init = 'zeros' if self.global_config.zero_init else 'linear'\n self.transition1 = makeLinear(nc, num_intermediate, dt, self.global_config.device, 'relu')\n self.transition2 = makeLinear(num_intermediate, nc, dt, self.global_config.device, init)\n self.transition = nn.Sequential(self.transition1, nn.ReLU(inplace=False), self.transition2)\n\n self.forward = self.go\n return self(act, mask, is_training)\n\n def go(self, act, mask, is_training=True):\n \"\"\"\n Arguments:\n act: A tensor of queries of size [batch_size, N_res, N_channel].\n mask: A tensor denoting the mask of size [batch_size, N_res].\n is_training: Whether the module is in training mode.\n\n Returns:\n A float32 tensor of size [batch_size, N_res, N_channel].\n \"\"\"\n mask = mask[...,None]\n act = self.input_layer_norm(act)\n act = self.transition(act)\n return act\n\n\nclass OuterProductMean(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Computes mean outer product.\n\n Jumper et al. (2021) Suppl. Alg. 10 \"OuterProductMean\"\n \"\"\"\n def __init__(self, config, global_config, num_output_channel):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.num_output_channel = num_output_channel\n self.forward = self.init_parameters\n \n def init_parameters(self, act, mask, is_training=True):\n dt = act.dtype\n c = self.config\n channel = act.size(-1)\n\n self.layer_norm_input = nn.LayerNorm(channel, elementwise_affine=True, device=self.global_config.device).to(dt)\n\n self.left_projection = makeLinear(channel, c.num_outer_channel, dt, self.global_config.device)\n self.right_projection = makeLinear(channel, c.num_outer_channel, dt, self.global_config.device)\n\n init = torch.zeros if self.global_config.zero_init else he_normal\n self.output_w = nn.Parameter(init(c.num_outer_channel, \n c.num_outer_channel, self.num_output_channel, dtype=dt, device=self.global_config.device))\n self.output_b = nn.Parameter(torch.zeros(self.num_output_channel, dtype=dt, device=self.global_config.device))\n\n self.forward = self.go\n return self(act, mask, is_training)\n\n def go(self, act, mask, is_training=True):\n \"\"\"\n Arguments:\n act: MSA representation, shape [N_seq, N_res, c_m].\n mask: MSA mask, shape [N_seq, N_res].\n is_training: Whether the module is in training mode.\n\n Returns:\n Update to pair representation, shape [N_res, N_res, c_z].\n \"\"\"\n mask = mask[..., None]\n act = self.layer_norm_input(act)\n \n left_act = mask * self.left_projection(act)\n right_act = mask * self.right_projection(act)\n\n act = torch.einsum('abc,ade,cef->bdf', left_act, right_act, self.output_w) + self.output_b\n\n epsilon = 1e-3\n norm = torch.einsum('abc,adc->bdc', mask, mask)\n act /= epsilon + norm\n return act\n\n\nclass MSAColumnAttention(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n MSA per-column attention.\n\n Jumper et al. (2021) Suppl. Alg. 8 \"MSAColumnAttention\"\n \"\"\"\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, msa_act, msa_mask, is_training=False):\n dt = msa_act.dtype\n\n self.query_norm = nn.LayerNorm(msa_act.size(-1), elementwise_affine=True, device=self.global_config.device).to(dt)\n\n self.attention = Attention(self.config, self.global_config, msa_act.size(-1))\n\n\n self.forward = self.go\n return self(msa_act, msa_mask, is_training)\n\n def go(self, msa_act, msa_mask, is_training=False):\n \"\"\"\n Arguments:\n msa_act: [N_seq, N_res, c_m] MSA representation.\n msa_mask: [N_seq, N_res] mask of non-padded regions.\n is_training: Whether the module is in training mode.\n\n Returns:\n Update to msa_act, shape [N_seq, N_res, c_m]\n \"\"\"\n assert len(msa_act.shape) == 3\n assert len(msa_mask.shape) == 2\n assert self.config.orientation == 'per_column'\n\n msa_act = torch.swapaxes(msa_act, -2, -3)\n msa_mask = torch.swapaxes(msa_mask, -1, -2)\n\n bias = (1e9 * (msa_mask - 1.))[:, None, None, :]\n assert len(bias.shape) == 4\n\n msa_act = self.query_norm(msa_act)\n msa_act = self.attention(msa_act, msa_act, bias)\n msa_act = torch.swapaxes(msa_act, -2, -3)\n return msa_act\n\n\nclass MSAColumnGlobalAttention(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n MSA per-column global attention.\n\n Jumper et al. (2021) Suppl. Alg. 19 \"MSAColumnGlobalAttention\"\n \"\"\"\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, msa_act, msa_mask, is_training=False):\n dt = msa_act.dtype\n\n self.query_norm = nn.LayerNorm(msa_act.size(-1), elementwise_affine=True, device=self.global_config.device).to(dt)\n self.attention = GlobalAttention(self.config, self.global_config, msa_act.size(-1))\n\n\n self.forward = self.go\n return self(msa_act, msa_mask, is_training)\n\n def go(self, msa_act, msa_mask, is_training=False):\n \"\"\"\n Arguments:\n msa_act: [N_seq, N_res, c_m] MSA representation.\n msa_mask: [N_seq, N_res] mask of non-padded regions.\n is_training: Whether the module is in training mode.\n\n Returns:\n Update to msa_act, shape [N_seq, N_res, c_m].\n \"\"\"\n assert len(msa_act.shape) == 3\n assert len(msa_mask.shape) == 2\n assert self.config.orientation == 'per_column'\n\n msa_act = torch.swapaxes(msa_act, -2, -3)\n msa_mask = torch.swapaxes(msa_mask, -1, -2)\n\n bias = (1e9 * (msa_mask - 1.))[:, None, None, :]\n assert len(bias.shape) == 4\n\n msa_act = self.query_norm(msa_act)\n\n \n # [N_seq, N_res, 1]\n msa_mask = msa_mask[...,None]\n msa_act = self.attention(msa_act, msa_act, msa_mask, bias)\n msa_act = torch.swapaxes(msa_act, -2, -3)\n return msa_act\n\n\nclass ResidualDropOut(nn.Module):\n def __init__(self, config, global_config):\n super().__init__()\n self.dim = 0 if config.orientation=='per_row' else 1\n self.dropout_rate = 0.0 if global_config.deterministic else config.dropout_rate\n self.bern = lambda sh: self.dropout_rate < torch.rand(*sh, device=global_config.device)\n self.keep_rate = 1 - self.dropout_rate\n\n def forward(self, x, residual, training):\n if training:\n shape = list(residual.size())\n shape[self.dim] = 1\n residual = residual * self.bern(shape) / self.keep_rate\n return x + residual\n\n\ndef create_extra_msa_feature(batch, n_cat):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Expand extra_msa into 1hot and concat with other extra msa features.\n\n We do this as late as possible as the one_hot extra msa can be very large.\n\n Arguments:\n batch: a dictionary with the following keys:\n * 'extra_msa': [N_extra_seq, N_res] MSA that wasn't selected as a cluster\n centre. Note, that this is not one-hot encoded.\n * 'extra_has_deletion': [N_extra_seq, N_res] Whether there is a deletion to\n the left of each position in the extra MSA.\n * 'extra_deletion_value': [N_extra_seq, N_res] The number of deletions to\n the left of each position in the extra MSA.\n\n Returns:\n Concatenated tensor of extra MSA features.\n \"\"\"\n # 7 = 4 bases + 'X' for unknown + gap + bert mask\n # 23 = 20 amino acids + 'X' for unknown + gap + bert mask\n dt = batch['extra_msa'].dtype\n msa_1hot = torch.functional.F.one_hot(batch['extra_msa'].long(), n_cat).to(dt)\n hd = batch['extra_has_deletion']\n dv = batch['extra_deletion_value']\n return torch.cat([msa_1hot,hd[...,None],dv[...,None]], axis=-1)\n\n\n# def create_extra_rna_msa_feature(batch):\n# # 7 = 4 bases + 'X' for unknown + gap + bert mask\n# dt = batch['extra_msa'].dtype\n# msa_1hot = torch.functional.F.one_hot(batch['extra_msa'].long(), 7).to(dt)\n# hd = batch['extra_has_deletion']\n# dv = batch['extra_deletion_value']\n# return torch.cat([msa_1hot,hd[...,None],dv[...,None]], axis=-1)\n\n\nclass EvoformerIteration(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Single iteration (block) of Evoformer stack.\n\n Jumper et al. (2021) Suppl. Alg. 6 \"EvoformerStack\" lines 2-10\n \"\"\"\n def __init__(self, config, global_config, is_extra_msa):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.is_extra_msa = is_extra_msa\n self.forward = self.init_parameters\n\n def init_parameters(self, x):\n activations, masks, is_training = x\n c = self.config\n gc = self.global_config\n\n pair_act = activations['pair']\n self.msa_row_attention_with_pair_bias = MSARowAttentionWithPairBias(\n c.msa_row_attention_with_pair_bias, gc)\n self.dropout_msa_r_a_w_p_b = ResidualDropOut(c.msa_transition, gc)\n\n if not self.is_extra_msa:\n self.msa_column_attention = MSAColumnAttention(\n c.msa_column_attention, gc)\n self.attn_mod = self.msa_column_attention\n else:\n self.msa_column_global_attention = MSAColumnGlobalAttention(\n c.msa_column_attention, gc)\n self.attn_mod = self.msa_column_global_attention\n\n self.dropout_msa_attn = ResidualDropOut(c.msa_column_attention, gc)\n\n\n self.msa_transition = Transition(c.msa_transition, gc)\n self.dropout_msa_transition = ResidualDropOut(c.msa_transition, gc)# not sure about the config\n\n self.outer_product_mean = OuterProductMean(c.outer_product_mean, gc, \n num_output_channel=int(pair_act.size(-1)))\n self.dropout_out_prd_mn = ResidualDropOut(c.outer_product_mean, gc)\n\n self.triangle_multiplication_outgoing = TriangleMultiplication(\n c.triangle_multiplication_outgoing, gc)\n self.dropout_tri_mult_out = ResidualDropOut(\n c.triangle_multiplication_outgoing, gc)\n \n con_tmi = c.triangle_multiplication_incoming\n self.triangle_multiplication_incoming = TriangleMultiplication(con_tmi, gc)\n self.dropout_tri_mult_inc = ResidualDropOut(con_tmi, gc)\n\n con_tas = c.triangle_attention_starting_node\n self.triangle_attention_starting_node = TriangleAttention(con_tas, gc)\n self.dropout_tri_atn_srt = ResidualDropOut(con_tas, gc)\n\n con_tae = c.triangle_attention_ending_node\n self.triangle_attention_ending_node = TriangleAttention(con_tae, gc)\n self.dropout_tri_atn_end = ResidualDropOut(con_tae, gc)\n\n con_prt = c.pair_transition\n self.pair_transition = Transition(con_prt, gc)\n self.dropout_pair_trn = ResidualDropOut(con_prt, gc)\n \n self.forward = self.go\n return self(x)\n\n def go(self, x):\n \"\"\"\n Arguments:\n activations: Dictionary containing activations:\n * 'msa': MSA activations, shape [N_seq, N_res, c_m].\n * 'pair': pair activations, shape [N_res, N_res, c_z].\n masks: Dictionary of masks:\n * 'msa': MSA mask, shape [N_seq, N_res].\n * 'pair': pair mask, shape [N_res, N_res].\n is_training: Whether the module is in training mode.\n safe_key: prng.SafeKey encapsulating rng key.\n\n Returns:\n Outputs, same shape/type as act.\n \"\"\"\n activations, masks, is_training = x\n\n msa_act, pair_act = activations['msa'], activations['pair']\n msa_mask, pair_mask = masks['msa'], masks['pair']\n \n msa_act = self.dropout_msa_r_a_w_p_b(msa_act, \n self.msa_row_attention_with_pair_bias(msa_act, msa_mask, pair_act), training=is_training)\n\n msa_act = self.dropout_msa_attn(msa_act, self.attn_mod(msa_act, msa_mask), training=is_training)\n\n msa_act = self.dropout_msa_transition(msa_act, \n self.msa_transition(msa_act, msa_mask), training=is_training)\n\n pair_act = self.dropout_out_prd_mn(pair_act, \n self.outer_product_mean(msa_act, msa_mask), training=is_training)\n\n pair_act = self.dropout_tri_mult_out(pair_act, \n self.triangle_multiplication_outgoing(pair_act, pair_mask), training=is_training)\n \n pair_act = self.dropout_tri_mult_inc(pair_act, \n self.triangle_multiplication_incoming(pair_act, pair_mask), training=is_training)\n\n pair_act = self.dropout_tri_atn_srt(pair_act, \n self.triangle_attention_starting_node(pair_act, pair_mask), training=is_training)\n\n pair_act = self.dropout_tri_atn_end(pair_act, \n self.triangle_attention_ending_node(pair_act, pair_mask), training=is_training)\n \n pair_act = self.dropout_pair_trn(pair_act, \n self.pair_transition(pair_act, pair_mask), training=is_training)\n\n return [{'msa': msa_act, 'pair': pair_act}, masks, is_training]\n\n\ndef dgram_from_positions(positions, num_bins, min_bin, max_bin):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Compute distogram from amino acid positions.\n\n Arguments:\n positions: [N_res, 3] Position coordinates.\n num_bins: The number of bins in the distogram.\n min_bin: The left edge of the first bin.\n max_bin: The left edge of the final bin. The final bin catches\n everything larger than `max_bin`.\n\n Returns:\n Distogram with the specified number of bins.\n \"\"\"\n lower_breaks = torch.linspace(min_bin, max_bin, num_bins, device=positions.device)**2\n upper_breaks = torch.cat([lower_breaks[1:],\n torch.tensor([1e8], dtype=torch.float32, device=positions.device)], dim=-1)\n dist2 = (positions[:,None,:] - positions[None,:,:]).sum(-1)[...,None]\n\n dgram = ((dist2>lower_breaks).float() * (dist2 0\n assert c.num_point_qk > 0\n assert c.num_point_v > 0\n\n self.q_scalar = makeLinear(inputs_1d.size(-1), c.num_head * c.num_scalar_qk, dt, self.global_config.device)\n self.kv_scalar = makeLinear(inputs_1d.size(-1), c.num_head * (c.num_scalar_v + c.num_scalar_qk), dt, self.global_config.device)\n self.q_point_local = makeLinear(inputs_1d.size(-1), c.num_head * 3 * c.num_point_qk, dt, self.global_config.device)\n self.kv_point_local = makeLinear(inputs_1d.size(-1), c.num_head * 3 * (c.num_point_qk + c.num_point_v), dt, self.global_config.device)\n\n self.trainable_point_weights = nn.Parameter(torch.ones(c.num_head, dtype=dt, device=self.global_config.device)*np.log(np.exp(1.) - 1.))\n self.softplus = nn.Softplus()\n\n self.attention_2d = makeLinear(inputs_2d.size(-1), c.num_head, dt, self.global_config.device)\n final_init = 'zeros' if self._zero_initialize_last else 'linear'\n\n out_dim = c.num_scalar_v + c.num_point_v * 4 + inputs_2d.size(-1)\n self.output_projection = makeLinear(c.num_head * out_dim, c.num_channel, dt, self.global_config.device, initializer=final_init)\n\n self.forward = self.go\n return self(inputs_1d, inputs_2d, mask, affine)\n\n def go(self, inputs_1d, inputs_2d, mask, affine):\n \"\"\"Compute geometry-aware attention.\n\n Given a set of query residues (defined by affines and associated scalar\n features), this function computes geometry-aware attention between the\n query residues and target residues.\n\n The residues produce points in their local reference frame, which\n are converted into the global frame in order to compute attention via\n euclidean distance.\n\n Equivalently, the target residues produce points in their local frame to be\n used as attention values, which are converted into the query residues'\n local frames.\n\n Args:\n inputs_1d: (N, C) 1D input embedding that is the basis for the\n scalar queries.\n inputs_2d: (N, M, C') 2D input embedding, used for biases and values.\n mask: (N, 1) mask to indicate which elements of inputs_1d participate\n in the attention.\n affine: QuatAffine object describing the position and orientation of\n every element in inputs_1d.\n\n Returns:\n Transformation of the input embedding.\n \"\"\"\n c = self.config\n num_residues = inputs_1d.size(0)\n\n # Construct scalar queries of shape:\n # [num_query_residues, num_head, num_points]\n q_scalar = self.q_scalar(inputs_1d)\n q_scalar = q_scalar.reshape(num_residues, c.num_head, c.num_scalar_qk)\n\n # Construct scalar keys/values of shape:\n # [num_target_residues, num_head, num_points]\n kv_scalar = self.kv_scalar(inputs_1d)\n kv_scalar = kv_scalar.reshape(num_residues, c.num_head, c.num_scalar_v + c.num_scalar_qk)\n k_scalar, v_scalar = torch.split(kv_scalar, c.num_scalar_qk, dim=-1)\n\n # Construct query points of shape:\n # [num_residues, num_head, num_point_qk]\n\n # First construct query points in local frame.\n q_point_local = self.q_point_local(inputs_1d)\n q_point_local = torch.split(q_point_local, q_point_local.size(-1)//3, dim=-1)\n\n # Project query points into global frame.\n q_point_global = affine.apply_to_point(q_point_local, extra_dims=1)\n # Reshape query point for later use.\n q_point = [x.reshape(num_residues, c.num_head, c.num_point_qk) for x in q_point_global]\n\n # Construct key and value points.\n # Key points have shape [num_residues, num_head, num_point_qk]\n # Value points have shape [num_residues, num_head, num_point_v]\n\n # Construct key and value points in local frame.\n kv_point_local = self.kv_point_local(inputs_1d)\n kv_point_local = torch.split(kv_point_local, kv_point_local.size(-1)//3, dim=-1)\n\n # Project key and value points into global frame.\n kv_point_global = affine.apply_to_point(kv_point_local, extra_dims=1)\n kv_point_global = [x.reshape(num_residues, c.num_head, (c.num_point_qk + c.num_point_v))\n for x in kv_point_global]\n # Split key and value points.\n k_point, v_point = list(\n zip(*[torch.split(x, [c.num_point_qk, x.size(-1)-c.num_point_qk], dim=-1) for x in kv_point_global]))\n\n # We assume that all queries and keys come iid from N(0, 1) distribution\n # and compute the variances of the attention logits.\n # Each scalar pair (q, k) contributes Var q*k = 1\n scalar_variance = max(c.num_scalar_qk, 1) * 1.\n # Each point pair (q, k) contributes Var [0.5 ||q||^2 - ] = 9 / 2\n point_variance = max(c.num_point_qk, 1) * 9. / 2\n\n # Allocate equal variance to scalar, point and attention 2d parts so that\n # the sum is 1.\n\n num_logit_terms = 3\n\n scalar_weights = (1.0 / (num_logit_terms * scalar_variance))**0.5\n point_weights = (1.0 / (num_logit_terms * point_variance))**0.5\n attention_2d_weights = (1.0 / (num_logit_terms))**0.5\n\n # Trainable per-head weights for points.\n trainable_point_weights = self.softplus(self.trainable_point_weights)\n point_weights *= trainable_point_weights[:,None,...]\n\n v_point = [torch.swapaxes(x, -2, -3) for x in v_point]\n q_point = [torch.swapaxes(x, -2, -3) for x in q_point]\n k_point = [torch.swapaxes(x, -2, -3) for x in k_point]\n dist2 = [(qx[:, :, None, :] - kx[:, None, :, :])**2\n for qx, kx in zip(q_point, k_point)]\n dist2 = sum(dist2)\n attn_qk_point = -0.5 * (point_weights[:, None, None, :] * dist2).sum(dim=-1)\n\n v = torch.swapaxes(v_scalar, -2, -3)\n q = torch.swapaxes(scalar_weights * q_scalar, -2, -3)\n k = torch.swapaxes(k_scalar, -2, -3)\n attn_qk_scalar = torch.matmul(q, torch.swapaxes(k, -2, -1))\n attn_logits = attn_qk_scalar + attn_qk_point\n\n attention_2d = self.attention_2d(inputs_2d)\n\n attention_2d = attention_2d.permute(2, 0, 1)\n attention_2d = attention_2d_weights * attention_2d\n attn_logits += attention_2d\n\n mask_2d = mask * torch.swapaxes(mask, -1, -2)\n attn_logits -= 1e5 * (1. - mask_2d)\n\n # [num_head, num_query_residues, num_target_residues]\n self.softmax = nn.Softmax(dim=-1)\n attn = self.softmax(attn_logits)\n\n # [num_head, num_query_residues, num_head * num_scalar_v]\n result_scalar = torch.matmul(attn, v)\n\n # For point result, implement matmul manually so that it will be a float32\n # on TPU. This is equivalent to\n # result_point_global = [jnp.einsum('bhqk,bhkc->bhqc', attn, vx)\n # for vx in v_point]\n # but on the TPU, doing the multiply and reduce_sum ensures the\n # computation happens in float32 instead of bfloat16.\n result_point_global = [(attn[:, :, :, None] * vx[:, None, :, :]).sum(dim=-2) for vx in v_point]\n\n # [num_query_residues, num_head, num_head * num_(scalar|point)_v]\n result_scalar = torch.swapaxes(result_scalar, -2, -3)\n result_point_global = [\n torch.swapaxes(x, -2, -3) for x in result_point_global]\n\n # Features used in the linear output projection. Should have the size\n # [num_query_residues, ?]\n output_features = []\n\n result_scalar = result_scalar.reshape(num_residues, c.num_head * c.num_scalar_v)\n output_features.append(result_scalar)\n\n result_point_global = [\n r.reshape(num_residues, c.num_head * c.num_point_v) for r in result_point_global]\n result_point_local = affine.invert_point(result_point_global, extra_dims=1)\n output_features.extend(result_point_local)\n\n output_features.append((self._dist_epsilon + (result_point_local[0]**2) +\n (result_point_local[1]**2) + (result_point_local[2]**2))**0.5)\n\n # Dimensions: h = heads, i and j = residues,\n # c = inputs_2d channels\n # Contraction happens over the second residue dimension, similarly to how\n # the usual attention is performed.\n result_attention_over_2d = torch.einsum('hij, ijc->ihc', attn, inputs_2d)\n num_out = c.num_head * result_attention_over_2d.size(-1)\n output_features.append(result_attention_over_2d.reshape(num_residues, num_out))\n\n final_act = torch.cat(output_features, dim=-1)\n return self.output_projection(final_act)\n\n\n\n# pylint: disable=bad-whitespace\nclass Quat:\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \"\"\"\n def __init__(self, device):\n zeros = lambda s: torch.zeros(*s, device=device)\n toTensor = lambda x: torch.tensor(x, device=device)\n\n rr = [[ 1, 0, 0], [ 0, 1, 0], [ 0, 0, 1]]\n ii = [[ 1, 0, 0], [ 0,-1, 0], [ 0, 0,-1]]\n jj = [[-1, 0, 0], [ 0, 1, 0], [ 0, 0,-1]]\n kk = [[-1, 0, 0], [ 0,-1, 0], [ 0, 0, 1]]\n\n ij = [[ 0, 2, 0], [ 2, 0, 0], [ 0, 0, 0]]\n ik = [[ 0, 0, 2], [ 0, 0, 0], [ 2, 0, 0]]\n jk = [[ 0, 0, 0], [ 0, 0, 2], [ 0, 2, 0]]\n\n ir = [[ 0, 0, 0], [ 0, 0,-2], [ 0, 2, 0]]\n jr = [[ 0, 0, 2], [ 0, 0, 0], [-2, 0, 0]]\n kr = [[ 0,-2, 0], [ 2, 0, 0], [ 0, 0, 0]]\n\n [rr,ii,jj,kk,ij,ik,jk,ir,jr,kr] = list(map(toTensor, [rr,ii,jj,kk,ij,ik,jk,ir,jr,kr]))\n QUAT_TO_ROT = zeros((4, 4, 3, 3))\n QUAT_TO_ROT[0, 0] = rr\n QUAT_TO_ROT[1, 1] = ii\n QUAT_TO_ROT[2, 2] = jj\n QUAT_TO_ROT[3, 3] = kk\n QUAT_TO_ROT[1, 2] = ij\n QUAT_TO_ROT[1, 3] = ik\n QUAT_TO_ROT[2, 3] = jk\n for i, t in zip([1,2,3], [ir,jr,kr]):\n QUAT_TO_ROT[0, i] = t\n\n qml = ([[ 1, 0, 0, 0],\n [ 0,-1, 0, 0],\n [ 0, 0,-1, 0],\n [ 0, 0, 0,-1]],\n\n [[ 0, 1, 0, 0],\n [ 1, 0, 0, 0],\n [ 0, 0, 0, 1],\n [ 0, 0,-1, 0]],\n\n [[ 0, 0, 1, 0],\n [ 0, 0, 0,-1],\n [ 1, 0, 0, 0],\n [ 0, 1, 0, 0]],\n\n [[ 0, 0, 0, 1],\n [ 0, 0, 1, 0],\n [ 0,-1, 0, 0],\n [ 1, 0, 0, 0]])\n\n QMs = list(map(toTensor, qml))\n QUAT_MULTIPLY = zeros((4, 4, 4))\n for i in range(4):\n QUAT_MULTIPLY[:,:,i] = QMs[i]\n self.QUAT_TO_ROT = QUAT_TO_ROT\n self.QUAT_MULTIPLY = QUAT_MULTIPLY\n self.QUAT_MULTIPLY_BY_VEC = QUAT_MULTIPLY[:, 1:, :]\n# pylint: enable=bad-whitespace\n\n\ndef rot_to_quat(rot, unstack_inputs=False):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Convert rotation matrix to quaternion.\n\n Note that this function calls self_adjoint_eig which is extremely expensive on\n the GPU. If at all possible, this function should run on the CPU.\n\n Args:\n rot: rotation matrix (see below for format).\n unstack_inputs: If true, rotation matrix should be shape (..., 3, 3)\n otherwise the rotation matrix should be a list of lists of tensors.\n\n Returns:\n Quaternion as (..., 4) tensor.\n \"\"\"\n if unstack_inputs:\n rot = [torch.moveaxis(x, -1, 0) for x in torch.moveaxis(rot, -2, 0)]\n\n [[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = rot\n\n # pylint: disable=bad-whitespace\n k = [[ xx + yy + zz, zy - yz, xz - zx, yx - xy,],\n [ zy - yz, xx - yy - zz, xy + yx, xz + zx,],\n [ xz - zx, xy + yx, yy - xx - zz, yz + zy,],\n [ yx - xy, xz + zx, yz + zy, zz - xx - yy,]]\n # pylint: enable=bad-whitespace\n\n k = (1./3.) * torch.stack([torch.stack(x, dim=-1) for x in k],\n dim=-2)\n\n # Get eigenvalues in non-decreasing order and associated.\n # d = k.device\n # _, qs = torch.linalg.eigh(k.to('cpu')).to(d)\n _, qs = torch.linalg.eigh(k)\n return qs[..., -1]\n\n\ndef rot_list_to_tensor(rot_list):\n \"\"\"Convert list of lists to rotation tensor.\"\"\"\n return torch.stack(\n [torch.stack(rot_list[0], dim=-1),\n torch.stack(rot_list[1], dim=-1),\n torch.stack(rot_list[2], dim=-1)],\n dim=-2)\n\n\ndef vec_list_to_tensor(vec_list):\n \"\"\"Convert list to vector tensor.\"\"\"\n return torch.stack(vec_list, dim=-1)\n\n\ndef quat_to_rot(QUAT_TO_ROT, normalized_quat):\n \"\"\"Convert a normalized quaternion to a rotation matrix.\"\"\"\n rot_tensor = torch.sum(\n torch.reshape(QUAT_TO_ROT, (4, 4, 9)) *\n normalized_quat[..., :, None, None] *\n normalized_quat[..., None, :, None],\n dim=(-3, -2))\n rot = torch.moveaxis(rot_tensor, -1, 0) # Unstack.\n return [[rot[0], rot[1], rot[2]],\n [rot[3], rot[4], rot[5]],\n [rot[6], rot[7], rot[8]]]\n\n\ndef quat_multiply_by_vec(QUAT_MULTIPLY_BY_VEC, quat, vec):\n \"\"\"Multiply a quaternion by a pure-vector quaternion.\"\"\"\n return torch.sum(\n QUAT_MULTIPLY_BY_VEC *\n quat[..., :, None, None] *\n vec[..., None, :, None],\n dim=(-3, -2))\n\n\ndef quat_multiply(QUAT_MULTIPLY, quat1, quat2):\n \"\"\"Multiply a quaternion by another quaternion.\"\"\"\n return torch.sum(\n QUAT_MULTIPLY *\n quat1[..., :, None, None] *\n quat2[..., None, :, None],\n dim=(-3, -2))\n\ndef apply_rot_to_vec(rot, vec, unstack=False):\n \"\"\"Multiply rotation matrix by a vector.\"\"\"\n if unstack:\n x, y, z = [vec[:, i] for i in range(3)]\n else:\n x, y, z = vec\n return [rot[0][0] * x + rot[0][1] * y + rot[0][2] * z,\n rot[1][0] * x + rot[1][1] * y + rot[1][2] * z,\n rot[2][0] * x + rot[2][1] * y + rot[2][2] * z]\n\ndef apply_inverse_rot_to_vec(rot, vec):\n \"\"\"Multiply the inverse of a rotation matrix by a vector.\"\"\"\n # Inverse rotation is just transpose\n return [rot[0][0] * vec[0] + rot[1][0] * vec[1] + rot[2][0] * vec[2],\n rot[0][1] * vec[0] + rot[1][1] * vec[1] + rot[2][1] * vec[2],\n rot[0][2] * vec[0] + rot[1][2] * vec[1] + rot[2][2] * vec[2]]\n\n\nclass QuatAffine(object):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Affine transformation represented by quaternion and vector.\"\"\"\n\n def __init__(self, quaternion, translation, device, rotation=None, normalize=True,\n unstack_inputs=False):\n \"\"\"Initialize from quaternion and translation.\n\n Args:\n quaternion: Rotation represented by a quaternion, to be applied\n before translation. Must be a unit quaternion unless normalize==True.\n translation: Translation represented as a vector.\n rotation: Same rotation as the quaternion, represented as a (..., 3, 3)\n tensor. If None, rotation will be calculated from the quaternion.\n normalize: If True, l2 normalize the quaternion on input.\n unstack_inputs: If True, translation is a vector with last component 3\n \"\"\"\n quat = Quat(device)\n self.QUAT_TO_ROT = quat.QUAT_TO_ROT\n self.QUAT_MULTIPLY = quat.QUAT_MULTIPLY\n self.QUAT_MULTIPLY_BY_VEC = quat.QUAT_MULTIPLY_BY_VEC\n\n self.device = device\n\n\n if quaternion is not None:\n assert quaternion.shape[-1] == 4\n\n if unstack_inputs:\n if rotation is not None:\n rotation = [torch.moveaxis(x, -1, 0) # Unstack.\n for x in torch.moveaxis(rotation, -2, 0)] # Unstack.\n translation = torch.moveaxis(translation, -1, 0) # Unstack.\n\n if normalize and quaternion is not None:\n quaternion = quaternion / torch.linalg.norm(quaternion, dim=-1,\n keepdims=True)\n\n if rotation is None:\n rotation = quat_to_rot(self.QUAT_TO_ROT, quaternion)\n\n self.quaternion = quaternion\n self.rotation = [list(row) for row in rotation]\n self.translation = list(translation)\n\n assert all(len(row) == 3 for row in self.rotation)\n assert len(self.translation) == 3\n\n def to_tensor(self):\n return torch.cat(\n [self.quaternion] + [x[...,None] for x in self.translation], dim=-1)\n\n def clone(self):\n return QuatAffine(self.quaternion.clone(),\n [x.clone() for x in self.translation],\n self.device,\n rotation=[[x.clone() for x in row] for row in self.rotation],\n normalize=False)\n\n def detach(self):\n \"\"\"Return a new QuatAffine with tensor_fn applied (e.g. stop_gradient).\"\"\"\n return QuatAffine(self.quaternion.detach(),\n [x.detach() for x in self.translation],\n self.device,\n rotation=[[x.detach() for x in row] for row in self.rotation],\n normalize=False)\n\n def detach_rot(self):\n return QuatAffine(self.quaternion.detach(),\n [x for x in self.translation],\n self.device,\n rotation=[[x.detach() for x in row] for row in self.rotation],\n normalize=False)\n\n def scale_translation(self, position_scale):\n \"\"\"Return a new quat affine with a different scale for translation.\"\"\"\n\n return QuatAffine(\n self.quaternion,\n [x * position_scale for x in self.translation],\n self.device,\n rotation=[[x for x in row] for row in self.rotation],\n normalize=False)\n\n @classmethod\n def from_tensor(cls, tensor, device, normalize=False):\n quaternion, tx, ty, tz = torch.split(tensor, [4, 1, 1, 1], dim=-1)\n return cls(quaternion, [tx[..., 0], ty[..., 0], tz[..., 0]], device, normalize=normalize)\n\n def pre_compose(self, update):\n \"\"\"Return a new QuatAffine which applies the transformation update first.\n\n Args:\n update: Length-6 vector. 3-vector of x, y, and z such that the quaternion\n update is (1, x, y, z) and zero for the 3-vector is the identity\n quaternion. 3-vector for translation concatenated.\n\n Returns:\n New QuatAffine object.\n \"\"\"\n vector_quaternion_update, x, y, z = torch.split(update, [3, 1, 1, 1], dim=-1)\n trans_update = [torch.squeeze(x, dim=-1),\n torch.squeeze(y, dim=-1),\n torch.squeeze(z, dim=-1)]\n\n new_quaternion = (self.quaternion +\n quat_multiply_by_vec(self.QUAT_MULTIPLY_BY_VEC,\n self.quaternion,\n vector_quaternion_update))\n\n trans_update = apply_rot_to_vec(self.rotation, trans_update)\n new_translation = [\n self.translation[0] + trans_update[0],\n self.translation[1] + trans_update[1],\n self.translation[2] + trans_update[2]]\n\n return QuatAffine(new_quaternion, new_translation, self.device)\n\n def apply_to_point(self, point, extra_dims=0):\n \"\"\"Apply affine to a point.\n\n Args:\n point: List of 3 tensors to apply affine.\n extra_dims: Number of dimensions at the end of the transformed_point\n shape that are not present in the rotation and translation. The most\n common use is rotation N points at once with extra_dims=1 for use in a\n network.\n\n Returns:\n Transformed point after applying affine.\n \"\"\"\n rotation = self.rotation\n translation = self.translation\n for _ in range(extra_dims):\n rotation = [[r[...,None] for r in row] for row in self.rotation]\n translation = [t[...,None] for t in self.translation]\n\n rot_point = apply_rot_to_vec(rotation, point)\n return [\n rot_point[0] + translation[0],\n rot_point[1] + translation[1],\n rot_point[2] + translation[2]]\n\n def invert_point(self, transformed_point, extra_dims=0):\n \"\"\"Apply inverse of transformation to a point.\n\n Args:\n transformed_point: List of 3 tensors to apply affine\n extra_dims: Number of dimensions at the end of the transformed_point\n shape that are not present in the rotation and translation. The most\n common use is rotation N points at once with extra_dims=1 for use in a\n network.\n\n Returns:\n Transformed point after applying affine.\n \"\"\"\n rotation = self.rotation\n translation = self.translation\n for _ in range(extra_dims):\n rotation = [[r[...,None] for r in row] for row in self.rotation]\n translation = [t[...,None] for t in self.translation]\n\n rot_point = [\n transformed_point[0] - translation[0],\n transformed_point[1] - translation[1],\n transformed_point[2] - translation[2]]\n\n return apply_inverse_rot_to_vec(rotation, rot_point)\n\n def __repr__(self):\n return 'QuatAffine(%r, %r)' % (self.quaternion, self.translation)\n\n\ndef _multiply(a, b):\n return torch.stack([\n torch.stack([a[0][0]*b[0][0] + a[0][1]*b[1][0] + a[0][2]*b[2][0],\n a[0][0]*b[0][1] + a[0][1]*b[1][1] + a[0][2]*b[2][1],\n a[0][0]*b[0][2] + a[0][1]*b[1][2] + a[0][2]*b[2][2]]),\n\n torch.stack([a[1][0]*b[0][0] + a[1][1]*b[1][0] + a[1][2]*b[2][0],\n a[1][0]*b[0][1] + a[1][1]*b[1][1] + a[1][2]*b[2][1],\n a[1][0]*b[0][2] + a[1][1]*b[1][2] + a[1][2]*b[2][2]]),\n\n torch.stack([a[2][0]*b[0][0] + a[2][1]*b[1][0] + a[2][2]*b[2][0],\n a[2][0]*b[0][1] + a[2][1]*b[1][1] + a[2][2]*b[2][1],\n a[2][0]*b[0][2] + a[2][1]*b[1][2] + a[2][2]*b[2][2]])])\n\n\ndef make_canonical_transform(n_xyz, ca_xyz, c_xyz):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Returns translation and rotation matrices to canonicalize residue atoms.\n\n Note that this method does not take care of symmetries. If you provide the\n atom positions in the non-standard way, the N atom will end up not at\n [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You\n need to take care of such cases in your code.\n\n Args:\n n_xyz: An array of shape [batch, 3] of nitrogen xyz coordinates.\n ca_xyz: An array of shape [batch, 3] of carbon alpha xyz coordinates.\n c_xyz: An array of shape [batch, 3] of carbon xyz coordinates.\n\n Returns:\n A tuple (translation, rotation) where:\n translation is an array of shape [batch, 3] defining the translation.\n rotation is an array of shape [batch, 3, 3] defining the rotation.\n After applying the translation and rotation to all atoms in a residue:\n * All atoms will be shifted so that CA is at the origin,\n * All atoms will be rotated so that C is at the x-axis,\n * All atoms will be shifted so that N is in the xy plane.\n \"\"\"\n assert len(n_xyz.shape) == 2, n_xyz.shape\n assert n_xyz.shape[-1] == 3, n_xyz.shape\n assert n_xyz.shape == ca_xyz.shape == c_xyz.shape, (\n n_xyz.shape, ca_xyz.shape, c_xyz.shape)\n\n device = c_xyz.device\n # Place CA at the origin.\n translation = -ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n # Place C on the x-axis.\n c_x, c_y, c_z = [c_xyz[:, i] for i in range(3)]\n # Rotate by angle c1 in the x-y plane (around the z-axis).\n sin_c1 = -c_y / (1e-20 + c_x**2 + c_y**2)**0.5\n cos_c1 = c_x / (1e-20 + c_x**2 + c_y**2)**0.5\n zeros = torch.zeros_like(sin_c1, device=device)\n ones = torch.ones_like(sin_c1, device=device)\n\n # pylint: disable=bad-whitespace\n c1_rot_matrix = torch.stack([torch.stack([cos_c1, -sin_c1, zeros]),\n torch.stack([sin_c1, cos_c1, zeros]),\n torch.stack([zeros, zeros, ones])])\n\n # Rotate by angle c2 in the x-z plane (around the y-axis).\n sin_c2 = c_z / (1e-20 + c_x**2 + c_y**2 + c_z**2)**0.5\n cos_c2 = ((c_x**2 + c_y**2)**0.5) / (\n 1e-20 + c_x**2 + c_y**2 + c_z**2)**0.5\n c2_rot_matrix = torch.stack([torch.stack([cos_c2, zeros, sin_c2]),\n torch.stack([zeros, ones, zeros]),\n torch.stack([-sin_c2, zeros, cos_c2])])\n\n c_rot_matrix = _multiply(c2_rot_matrix, c1_rot_matrix)\n n_xyz = torch.stack(apply_rot_to_vec(c_rot_matrix, n_xyz, unstack=True)).T\n\n # Place N in the x-y plane.\n _, n_y, n_z = [n_xyz[:, i] for i in range(3)]\n # Rotate by angle alpha in the y-z plane (around the x-axis).\n sin_n = -n_z / (1e-20 + n_y**2 + n_z**2)**0.5\n cos_n = n_y / (1e-20 + n_y**2 + n_z**2)**0.5\n n_rot_matrix = torch.stack([torch.stack([ones, zeros, zeros]),\n torch.stack([zeros, cos_n, -sin_n]),\n torch.stack([zeros, sin_n, cos_n])])\n # pylint: enable=bad-whitespace\n\n return (translation,\n torch.permute(_multiply(n_rot_matrix, c_rot_matrix), [2, 0, 1]))\n\n\ndef make_transform_from_reference(n_xyz, ca_xyz, c_xyz):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Returns rotation and translation matrices to convert from reference.\n\n Note that this method does not take care of symmetries. If you provide the\n atom positions in the non-standard way, the N atom will end up not at\n [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You\n need to take care of such cases in your code.\n\n Args:\n n_xyz: An array of shape [batch, 3] of nitrogen xyz coordinates.\n ca_xyz: An array of shape [batch, 3] of carbon alpha xyz coordinates.\n c_xyz: An array of shape [batch, 3] of carbon xyz coordinates.\n\n Returns:\n A tuple (rotation, translation) where:\n rotation is an array of shape [batch, 3, 3] defining the rotation.\n translation is an array of shape [batch, 3] defining the translation.\n After applying the translation and rotation to the reference backbone,\n the coordinates will approximately equal to the input coordinates.\n\n The order of translation and rotation differs from make_canonical_transform\n because the rotation from this function should be applied before the\n translation, unlike make_canonical_transform.\n \"\"\"\n translation, rotation = make_canonical_transform(n_xyz, ca_xyz, c_xyz)\n return torch.permute(rotation, (0, 2, 1)), -translation\n\n\ndef generate_new_affine(sequence_mask, device):\n num_residues = sequence_mask.size(0)\n quaternion = torch.tile(\n torch.tensor([1., 0., 0., 0.], device=device).reshape(1, 4),\n [num_residues, 1])\n\n translation = torch.zeros(num_residues, 3, device=device)\n return QuatAffine(quaternion, translation, device, unstack_inputs=True)\n\n\nclass FoldIteration(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n A single iteration of the main structure module loop.\n\n Jumper et al. (2021) Suppl. Alg. 20 \"StructureModule\" lines 6-21\n\n First, each residue attends to all residues using InvariantPointAttention.\n Then, we apply transition layers to update the hidden representations.\n Finally, we use the hidden representations to produce an update to the\n affine of each residue.\n \"\"\"\n\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, activations,\n sequence_mask,\n update_affine,\n is_training,\n initial_act,\n static_feat_2d=None,\n aatype=None):\n dt = activations['act'].dtype\n c = self.config\n self.dropout1 = nn.Dropout()#p=c.dropout * (1-self.global_config.deterministic))\n self.dropout2 = nn.Dropout()\n\n n = activations['act'].size(-1)\n self.invariant_point_attention = InvariantPointAttention(self.config, self.global_config)\n self.attention_layer_norm = nn.LayerNorm(n, elementwise_affine=True, device=self.global_config.device).to(dt)\n\n final_init = 'zeros' if self.global_config.zero_init else 'linear'\n ch = n\n transitions = []\n for i in range(c.num_layer_in_transition):\n init = 'relu' if i < c.num_layer_in_transition - 1 else final_init\n transitions.append(makeLinear(ch, c.num_channel, dt, self.global_config.device, initializer=init))\n if i < c.num_layer_in_transition - 1: transitions.append(nn.ReLU(inplace=False))\n ch = c.num_channel\n self.transition = nn.Sequential(*transitions)\n self.transition_layer_norm = nn.LayerNorm(c.num_channel, elementwise_affine=True, device=self.global_config.device).to(dt)\n\n if update_affine:\n # This block corresponds to\n # Jumper et al. (2021) Alg. 23 \"Backbone update\"\n affine_update_size = 6\n self.affine_update = makeLinear(c.num_channel, affine_update_size, dt, self.global_config.device, initializer=final_init)\n\n self.forward = self.go\n return self(activations,sequence_mask,update_affine,\n is_training,initial_act,static_feat_2d,aatype)\n\n def go(self, activations, sequence_mask, update_affine, is_training,\n initial_act, static_feat_2d=None, aatype=None):\n self.dropout1.p = int(not self.global_config.deterministic) * self.config.dropout * int(is_training)\n self.dropout2.p = self.dropout1.p\n affine = QuatAffine.from_tensor(activations['affine'], self.global_config.device)\n\n act = activations['act']\n\n # Attention\n residual = self.invariant_point_attention(act.clone(), static_feat_2d, sequence_mask, affine)\n act += residual\n dropped = self.dropout1(act)\n act = self.attention_layer_norm(dropped)\n\n # Transition\n residual = self.transition(act.clone())\n act += residual\n dropped = self.dropout2(act)\n act = self.transition_layer_norm(dropped)\n\n if update_affine:\n # This block corresponds to\n # Jumper et al. (2021) Alg. 23 \"Backbone update\"\n # Affine update\n affine_update = self.affine_update(act.clone())\n affine = affine.pre_compose(affine_update)\n \n outputs = {'affine': affine.to_tensor()}\n affine = affine.detach_rot()\n\n return {'act': act, 'affine': affine.to_tensor()}, outputs\n\n\nclass GenerateAffines(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Generate predicted affines for a single chain.\n\n Jumper et al. (2021) Suppl. Alg. 20 \"StructureModule\"\n\n This is the main part of the structure module - it iteratively applies\n folding to produce a set of predicted residue positions.\n \"\"\"\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, representations, batch, is_training):\n dt = representations['single'].dtype\n s = representations['single'].size(-1)\n self.initial_projection = makeLinear(s, self.config.num_channel, dt, self.global_config.device)\n self.fold_iteration = FoldIteration(self.config, self.global_config)\n\n self.single_layer_norm = nn.LayerNorm(s, elementwise_affine=True, device=self.global_config.device).to(dt)\n self.pair_layer_norm = nn.LayerNorm(representations['pair'].size(-1), elementwise_affine=True, device=self.global_config.device).to(dt)\n\n self.forward = self.go\n return self(representations, batch, is_training)\n\n def go(self, representations, batch, is_training):\n \"\"\"\n Args:\n representations: Representations dictionary.\n batch: Batch dictionary.\n config: Config for the structure module.\n global_config: Global config.\n is_training: Whether the model is being trained.\n safe_key: A prng.SafeKey object that wraps a PRNG key.\n\n Returns:\n A dictionary containing residue affines and sidechain positions.\n \"\"\"\n c = self.config\n sequence_mask = batch['seq_mask'][:, None]\n\n act = self.single_layer_norm(representations['single'])\n initial_act = act\n\n act = self.initial_projection(act)\n affine = generate_new_affine(sequence_mask, self.global_config.device)\n\n assert len(batch['seq_mask'].shape) == 1\n\n activations = {'act': act, 'affine': affine.to_tensor()}\n act_2d = self.pair_layer_norm(representations['pair'])\n\n outputs = []\n for _ in range(c.num_layer):\n # activations have detached grads for rot, and output \n activations, output = self.fold_iteration(\n activations,\n initial_act=initial_act,\n static_feat_2d=act_2d,\n sequence_mask=sequence_mask,\n update_affine=True,\n is_training=is_training,\n aatype=batch['aatype'])\n outputs.append(output)\n # outputs.append({k:torch.stack(v) for k,v in output.items()})\n output = {k:torch.stack([o[k] for o in outputs]) for k in output}\n # output = jax.tree_map(lambda *x: jnp.stack(x), *outputs)\n # Include the activations in the output dict for use by the LDDT-Head.\n output['act'] = activations['act']\n return output\n\n\n# Array of 3-component vectors, stored as individual array for\n# each component.\nVecs = collections.namedtuple('Vecs', ['x', 'y', 'z'])\n\n# Array of 3x3 rotation matrices, stored as individual array for\n# each component.\nRots = collections.namedtuple('Rots', ['xx', 'xy', 'xz',\n 'yx', 'yy', 'yz',\n 'zx', 'zy', 'zz'])\n# Array of rigid 3D transformations, stored as array of rotations and\n# array of translations.\nRigids = collections.namedtuple('Rigids', ['rot', 'trans'])\n\ndef rigids_from_quataffine(a: QuatAffine) -> Rigids:\n \"\"\"Converts QuatAffine object to the corresponding Rigids object.\"\"\"\n return Rigids(Rots(*tree.flatten(a.rotation)),\n Vecs(*a.translation))\n\ndef vecs_squared_distance(v1: Vecs, v2: Vecs):\n \"\"\"Computes squared euclidean difference between 'v1' and 'v2'.\"\"\"\n return (v1.x - v2.x)**2 + (v1.y - v2.y)**2 + (v1.z - v2.z)**2\n\ndef vecs_add(v1: Vecs, v2: Vecs) -> Vecs:\n \"\"\"Add two vectors 'v1' and 'v2'.\"\"\"\n return Vecs(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z)\n\ndef rigids_mul_vecs(r: Rigids, v: Vecs) -> Vecs:\n \"\"\"Apply rigid transforms 'r' to points 'v'.\"\"\"\n return vecs_add(rots_mul_vecs(r.rot, v), r.trans)\n\ndef rots_mul_vecs(m: Rots, v: Vecs) -> Vecs:\n \"\"\"Apply rotations 'm' to vectors 'v'.\"\"\"\n return Vecs(m.xx * v.x + m.xy * v.y + m.xz * v.z,\n m.yx * v.x + m.yy * v.y + m.yz * v.z,\n m.zx * v.x + m.zy * v.y + m.zz * v.z)\n\n\ndef invert_rots(m: Rots) -> Rots:\n \"\"\"Computes inverse of rotations 'm'.\"\"\"\n return Rots(m.xx, m.yx, m.zx,\n m.xy, m.yy, m.zy,\n m.xz, m.yz, m.zz)\n\n\ndef invert_rigids(r: Rigids) -> Rigids:\n \"\"\"Computes group inverse of rigid transformations 'r'.\"\"\"\n inv_rots = invert_rots(r.rot)\n t = rots_mul_vecs(inv_rots, r.trans)\n inv_trans = Vecs(-t.x, -t.y, -t.z)\n return Rigids(inv_rots, inv_trans)\n\n\ndef frame_aligned_point_error(\n pred_frames, # shape (num_frames)\n target_frames, # shape (num_frames)\n frames_mask, # shape (num_frames)\n pred_positions, # shape (num_positions)\n target_positions, # shape (num_positions)\n positions_mask, # shape (num_positions)\n length_scale,\n l1_clamp_distance,\n epsilon=1e-4): # shape ()\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Measure point error under different alignments.\n\n Jumper et al. (2021) Suppl. Alg. 28 \"computeFAPE\"\n\n Computes error between two structures with B points under A alignments derived\n from the given pairs of frames.\n Args:\n pred_frames: num_frames reference frames for 'pred_positions'.\n target_frames: num_frames reference frames for 'target_positions'.\n frames_mask: Mask for frame pairs to use.\n pred_positions: num_positions predicted positions of the structure.\n target_positions: num_positions target positions of the structure.\n positions_mask: Mask on which positions to score.\n length_scale: length scale to divide loss by.\n l1_clamp_distance: Distance cutoff on error beyond which gradients will\n be zero.\n epsilon: small value used to regularize denominator for masked average.\n Returns:\n Masked Frame Aligned Point Error.\n \"\"\"\n\n # pred_frames and pred_positions arrays may have a batch dim,\n # if so will need to repeat along other arrays\n assert pred_frames.rot.xx.ndim in {1,2}\n assert target_frames.rot.xx.ndim == 1\n assert frames_mask.ndim == 1, frames_mask.ndim\n assert pred_positions.x.ndim in {1,2}\n assert target_positions.x.ndim == 1\n assert positions_mask.ndim == 1\n\n s = pred_frames.rot.xx.shape[:-1]\n mapper = lambda C,o,f: C(**{k:f(v) for k,v in o._asdict().items()})\n\n # Compute array of predicted positions in the predicted frames.\n ir = invert_rigids(pred_frames)\n local_pred_pos = rigids_mul_vecs(\n Rigids(mapper(Rots, ir.rot, lambda a:a[..., None]), \n mapper(Vecs, ir.trans, lambda a:a[..., None])),\n mapper(Vecs, pred_positions, lambda a:a[..., None, :])\n )\n\n # Compute array of target positions in the target frames.\n irt = invert_rigids(target_frames)\n local_target_pos = rigids_mul_vecs(\n Rigids(mapper(Rots, irt.rot, lambda a:a[..., None].repeat(*s, 1, 1)),\n mapper(Vecs, irt.trans, lambda a:a[..., None].repeat(*s, 1, 1))),\n mapper(Vecs, target_positions, lambda a:a[..., None, :].repeat(*s, 1, 1))\n )\n\n # Compute errors between the structures.\n error_dist = (vecs_squared_distance(local_pred_pos, local_target_pos) + epsilon)**0.5\n\n if l1_clamp_distance:\n error_dist = torch.clip(error_dist, 0, l1_clamp_distance)\n\n normed_error = error_dist / length_scale\n\n extra_dims = [None]*(len(normed_error.shape)-2)\n normed_error *= frames_mask[extra_dims+[slice(None),None]]\n normed_error *= positions_mask[extra_dims+[None,slice(None)]]\n\n normalization_factor = (frames_mask.sum(-1) * positions_mask.sum(-1))\n return normed_error.sum(dim=(-2, -1)) / (epsilon + normalization_factor)\n\n\ndef backbone_loss(ret, batch, value, config, device):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Backbone FAPE Loss.\n\n Jumper et al. (2021) Suppl. Alg. 20 \"StructureModule\" line 17\n\n Args:\n ret: Dictionary to write outputs into, needs to contain 'loss'.\n batch: Batch, needs to contain 'backbone_affine_tensor',\n 'backbone_affine_mask'.\n value: Dictionary containing structure module output, needs to contain\n 'traj', a trajectory of rigids.\n config: Configuration of loss, should contain 'fape.clamp_distance' and\n 'fape.loss_unit_distance'.\n \"\"\"\n affine_trajectory = QuatAffine.from_tensor(value['traj'], device)\n rigid_trajectory = rigids_from_quataffine(affine_trajectory)\n\n gt_affine = QuatAffine.from_tensor(\n batch['backbone_affine_tensor'], device)\n gt_rigid = rigids_from_quataffine(gt_affine)\n backbone_mask = batch['backbone_affine_mask']\n \n args = (rigid_trajectory, gt_rigid, backbone_mask, rigid_trajectory.trans, gt_rigid.trans, backbone_mask)\n \n fape_loss = frame_aligned_point_error(\n *args,\n l1_clamp_distance=config.fape.clamp_distance,\n length_scale=config.fape.loss_unit_distance\n )\n\n if 'use_clamped_fape' in batch:\n # Jumper et al. (2021) Suppl. Sec. 1.11.5 \"Loss clamping details\"\n use_clamped_fape = batch['use_clamped_fape'].float()\n fape_loss_unclamped = frame_aligned_point_error(\n *args,\n l1_clamp_distance=None,\n length_scale=config.fape.loss_unit_distance\n )\n fape_loss = (fape_loss * use_clamped_fape +\n fape_loss_unclamped * (1 - use_clamped_fape))\n\n ret['fape'] = fape_loss[-1]\n ret['loss'] += fape_loss.mean()\n\nclass StructureModule(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n StructureModule as a network head.\n\n Jumper et al. (2021) Suppl. Alg. 20 \"StructureModule\"\n \"\"\"\n\n def __init__(self, config, global_config, compute_loss=True):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.compute_loss = compute_loss\n self.forward = self.init_parameters\n\n def init_parameters(self, representations, batch, is_training):\n self.generate_affines = GenerateAffines(self.config, self.global_config)\n self.scale = torch.tensor([1.] * 4 + [self.config.position_scale] * 3, \n device=self.global_config.device)\n self.forward = self.go\n return self(representations, batch, is_training)\n\n def go(self, representations, batch, is_training):\n ret = {}\n \n # print('repres:')\n # print({k+', '+str(v.shape) for k,v in representations.items()})\n # print('batch:')\n # print({k+', '+str(v.shape) for k,v in batch.items()})\n\n output = self.generate_affines(representations, batch, is_training)\n representations['structure_module'] = output['act']\n ret['traj'] = output['affine'] * self.scale\n ret['final_affines'] = ret['traj'][-1]\n\n ret['final_frame_pos'] = ret['traj'][-1][...,-3:]\n\n if self.compute_loss:\n return ret\n else:\n no_loss_features = ['final_frame_pos']#['final_atom_positions', 'final_atom_mask']\n no_loss_ret = {k: ret[k] for k in no_loss_features}\n return no_loss_ret\n\n def loss(self, value, batch):\n ret = {'loss': 0.}\n backbone_loss(ret, batch, value, self.config, self.global_config.device)\n return ret\n\n\nclass MaskedMsaHead(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Head to predict MSA at the masked locations.\n\n The MaskedMsaHead employs a BERT-style objective to reconstruct a masked\n version of the full MSA, based on a linear projection of\n the MSA representation.\n Jumper et al. (2021) Suppl. Sec. 1.9.9 \"Masked MSA prediction\"\n \"\"\"\n\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, representations, batch, is_training):\n dt = representations['msa'].dtype\n gc = self.global_config\n init = 'zeros' if gc.zero_init else 'linear'\n h = representations['msa'].size(-1)\n self.logits = makeLinear(h, gc.msa_n_token, dt, gc.device, initializer=init)\n self.rnalogits = makeLinear(h, gc.rna_msa_n_token, dt, gc.device, initializer=init)\n\n if is_training:\n self.smxe = nn.CrossEntropyLoss(reduction='none')\n\n self.forward = self.go\n return self(representations, batch, is_training)\n\n def go(self, representations, batch, is_training):\n \"\"\"\n Arguments:\n representations: Dictionary of representations, must contain:\n * 'msa': MSA representation, shape [N_seq, N_res, c_m].\n batch: Batch, unused.\n is_training: Whether the module is in training mode.\n\n Returns:\n Dictionary containing:\n * 'logits': logits of shape [N_seq, N_res, N_aatype] with\n (unnormalized) log probabilies of predicted aatype at position.\n \"\"\"\n layer = self.rnalogits if 'rna' in batch else self.logits\n del batch\n logits = layer(representations['msa'])\n return dict(logits=logits)\n\n def loss(self, value, batch):\n s = batch['true_msa'].shape\n l = value['logits'].reshape(-1,value['logits'].size(-1))\n tr = batch['true_msa'].reshape(-1).long()\n errors = self.smxe(l, tr).reshape(s)\n\n loss = (torch.sum(errors * batch['bert_mask'], dim=(-2, -1)) /\n (1e-8 + torch.sum(batch['bert_mask'], dim=(-2, -1))))\n return {'loss': loss}\n\n\ndef _distogram_log_loss(logits, bin_edges, batch, num_bins):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Log loss of a distogram.\"\"\"\n\n assert len(logits.shape) == 3\n positions = batch['pseudo_beta']\n mask = batch['pseudo_beta_mask']\n\n assert positions.shape[-1] == 3\n\n sq_breaks = bin_edges**2\n dist2 = ((positions[...,None,:] - positions[...,None,:,:])**2).sum(-1)[...,None]\n true_bins = (dist2 > sq_breaks).sum(-1)\n\n s = true_bins.shape\n smxe = nn.CrossEntropyLoss(reduction='none')\n errors = smxe(logits.reshape(-1, logits.size(-1)), true_bins.reshape(-1).long()).reshape(s)\n square_mask = mask[...,None,:] * mask[...,None]\n\n avg_error = (\n torch.sum(errors * square_mask, dim=(-2, -1)) /\n (1e-6 + torch.sum(square_mask, dim=(-2, -1))))\n dist2 = dist2[..., 0]\n return dict(loss=avg_error, true_dist=(1e-6 + dist2)**0.5)\n\n\nclass DistogramHead(nn.Module):\n \"\"\" \n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n Head to predict a distogram.\n\n Jumper et al. (2021) Suppl. Sec. 1.9.8 \"Distogram prediction\"\n \"\"\"\n\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n\n def init_parameters(self, representations, batch, is_training):\n dt = representations['pair'].dtype\n init = 'zeros' if self.global_config.zero_init else 'linear'\n self.half_logits = makeLinear(representations['pair'].size(-1), \n self.config.num_bins, dt, self.global_config.device, initializer=init)\n self.breaks = torch.linspace(self.config.first_break, self.config.last_break,\n self.config.num_bins - 1, device=self.global_config.device)\n\n self.forward = self.go\n return self(representations, batch, is_training)\n\n def go(self, representations, batch, is_training):\n \"\"\"\n Arguments:\n representations: Dictionary of representations, must contain:\n * 'pair': pair representation, shape [N_res, N_res, c_z].\n batch: Batch, unused.\n is_training: Whether the module is in training mode.\n\n Returns:\n Dictionary containing:\n * logits: logits for distogram, shape [N_res, N_res, N_bins].\n * bin_breaks: array containing bin breaks, shape [N_bins - 1,].\n \"\"\"\n half_logits = self.half_logits(representations['pair'])\n\n logits = half_logits + torch.swapaxes(half_logits, -2, -3)\n\n return dict(logits=logits, bin_edges=self.breaks)\n\n def loss(self, value, batch):\n dll = _distogram_log_loss(value['logits'], value['bin_edges'],\n batch, self.config.num_bins)\n return dll\n\n\nclass AlphaFoldIteration(nn.Module):\n \"\"\"\n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n \n A single recycling iteration of AlphaFold architecture.\n\n Computes ensembled (averaged) representations from the provided features.\n These representations are then passed to the various heads\n that have been requested by the configuration file. Each head also returns a\n loss which is combined as a weighted sum to produce the total loss.\n\n Jumper et al. (2021) Suppl. Alg. 2 \"Inference\" lines 3-22\n \"\"\"\n\n def __init__(self, config, global_config):\n super().__init__()\n self.config, self.global_config = config, global_config\n self.forward = self.init_parameters\n \n def init_parameters(self, ensembled_batch, non_ensembled_batch, is_training,\n compute_loss=False, ensemble_representations=False):\n \n self.evoformer = EmbeddingsAndEvoformer(\n self.config.embeddings_and_evoformer, self.global_config)\n\n self.head_cfg = {}\n self.heads = nn.ModuleDict()\n for head_name, head_config in sorted(self.config.heads.items()):\n if not head_config.weight:\n continue # Do not instantiate zero-weight heads.\n\n head_namespace = {\n 'masked_msa': MaskedMsaHead,\n 'distogram': DistogramHead,\n 'structure_module': functools.partial(StructureModule, compute_loss=True),\n }\n self.head_cfg[head_name] = head_config\n self.heads[head_name] = head_namespace[head_name](head_config, self.global_config)\n\n self.forward = self.go\n return self(ensembled_batch, non_ensembled_batch, is_training,\n compute_loss=compute_loss, ensemble_representations=ensemble_representations)\n\n def go(self, ensembled_batch, non_ensembled_batch, is_training,\n compute_loss=False, ensemble_representations=False):\n '''\n if not ensemble_representations:\n ensembled_batch : is unbatched when it passes through the embeddings and evoformer\n non_ensembled_batch : similarly the representations are unbatched when passing through heads\n '''\n\n num_ensemble = ensembled_batch['seq_length'].shape[0]\n\n if not ensemble_representations:\n # print(ensembled_batch['seq_length'].shape)\n assert ensembled_batch['seq_length'].shape[0] == 1\n\n def slice_batch(i):\n b = {k: v[i] for k, v in ensembled_batch.items()}\n b.update(non_ensembled_batch)\n return b\n\n # Compute representations for each batch element and average.\n # print('ensembled_batch:')\n # print({k+', '+str(v.shape) for k,v in ensembled_batch.items()})\n \n batch0 = slice_batch(0)\n representations = self.evoformer(batch0, is_training)\n\n # MSA representations are not ensembled so\n # we don't pass tensor into the loop.\n msa_representation = representations['msa']\n del representations['msa']\n\n # Average the representations (except MSA) over the batch dimension.\n if ensemble_representations:\n\n for i in range(1, num_ensemble):\n representations_update = self.evoformer(slice_batch(i), is_training)\n for k in representations:\n representations[k] += representations_update[k]\n \n for k in representations:\n if k != 'msa':\n representations[k] /= num_ensemble\n\n representations['msa'] = msa_representation\n batch = batch0 # We are not ensembled from here on.\n\n total_loss = 0.\n ret = {}\n ret['representations'] = representations\n\n def loss(module, head_config, ret, name, filter_ret=True):\n loss_output = module.loss(ret[name] if filter_ret else ret, batch)\n ret[name].update(loss_output)\n return head_config.weight * ret[name]['loss']\n\n loss_rec = {}\n for name, module in self.heads.items():\n # Skip PredictedLDDTHead and PredictedAlignedErrorHead until\n # StructureModule is executed.\n if name not in ('predicted_lddt', 'predicted_aligned_error'):\n ret[name] = module(representations, batch, is_training)\n if compute_loss:\n l = loss(module, self.head_cfg[name], ret, name)\n total_loss += l\n loss_rec[name] = l.data.item()\n\n if compute_loss: \n return ret, total_loss, loss_rec\n return ret\n\ndef tree_map(fn, tree_):\n return {k:tree_map(fn, b) if type(b)==dict else fn(b) for k,b in tree_.items()}\n\nclass AlphaFold(pl.LightningModule):\n \"\"\" \n DeepMind AlphaFold code https://github.com/deepmind/alphafold\n ported to pytorch by Louis Robinson (21 Aug 2021).\n\n AlphaFold model with recycling.\n\n Jumper et al. (2021) Suppl. Alg. 2 \"Inference\"\n \"\"\"\n\n def __init__(self, config, is_training, compute_loss=False,\n ensemble_representations=False, return_representations=False,):\n super().__init__()\n self.config = config['model']\n self.global_config = config['model'].global_config\n self.data_config = config['data']\n self.alphafold_iteration = AlphaFoldIteration(self.config, self.global_config)\n self.is_training = is_training\n self.compute_loss = compute_loss\n self.ensemble_representations = ensemble_representations\n self.return_representations = return_representations\n \n def forward(self, batch):\n \"\"\"\n Arguments:\n batch: Dictionary with inputs to the AlphaFold model.\n is_training: Whether the system is in training or inference mode.\n compute_loss: Whether to compute losses (requires extra features\n to be present in the batch and knowing the true structure).\n ensemble_representations: Whether to use ensembling of representations.\n return_representations: Whether to also return the intermediate\n representations.\n\n Returns:\n When compute_loss is True:\n a tuple of loss and output of AlphaFoldIteration.\n When compute_loss is False:\n just output of AlphaFoldIteration.\n\n The output of AlphaFoldIteration is a nested dictionary containing\n predictions from the various heads.\n \"\"\"\n batch_size, num_residues = batch['aatype'].shape\n\n def get_prev(ret):\n return {\n # 'prev_pos': ret['structure_module']['final_atom_positions'].detach(),\n 'prev_pos': ret['structure_module']['final_frame_pos'].detach(),\n 'prev_msa_first_row': ret['representations']['msa_first_row'].detach(),\n 'prev_pair': ret['representations']['pair'].detach(),\n }\n\n def call_af_iter(prev, recycle_idx, comp_loss, num_iter):\n if self.config.resample_msa_in_recycling:\n num_ensemble = batch_size // (num_iter + 1)\n \n def slice_recycle_idx(x):\n # take the next 'num_ensemble' length slice from the first dim of each array in dict\n return x[recycle_idx * num_ensemble:(1+recycle_idx) * num_ensemble]\n \n ensembled_batch = tree_map(slice_recycle_idx, batch)\n else:\n # num_ensemble = batch_size\n ensembled_batch = batch\n args = [ensembled_batch, prev, self.is_training]\n kwargs = {'compute_loss':comp_loss, 'ensemble_representations':self.ensemble_representations}\n if not comp_loss:\n with torch.no_grad():\n outp = self.alphafold_iteration(*args, **kwargs)\n return outp\n return self.alphafold_iteration(*args, **kwargs)\n\n if self.config.num_recycle:\n emb_config = self.config.embeddings_and_evoformer\n prev = {\n # 'prev_pos': torch.zeros(num_residues, residue_constants.atom_type_num, 3),\n 'prev_pos': torch.zeros(num_residues, 3, device=self.global_config.device),\n 'prev_msa_first_row': torch.zeros(num_residues, emb_config.msa_channel, device=self.global_config.device),\n 'prev_pair': torch.zeros(num_residues, num_residues, emb_config.pair_channel, device=self.global_config.device),\n }\n\n if 'num_iter_recycling' in batch:\n # Training time: num_iter_recycling is in batch.\n # The value for each ensemble batch is the same, so arbitrarily taking\n # 0-th.\n num_iter = batch['num_iter_recycling'][0]\n\n # Add insurance that we will not run more\n # recyclings than the model is configured to run.\n # num_iter = min(num_iter, self.config.num_recycle)\n else:\n # Eval mode or tests: use the maximum number of iterations.\n num_iter = self.config.num_recycle\n \n for i in range(num_iter-1):\n prev = get_prev(call_af_iter(prev, i, False, num_iter))\n\n else:\n prev = {}\n num_iter = 0\n\n ret = call_af_iter(prev, num_iter, self.compute_loss, num_iter)\n if self.compute_loss:\n ret = ret[0], [ret[1]], ret[2]\n\n if not self.return_representations:\n del (ret[0] if self.compute_loss else ret)['representations'] # pytype: disable=unsupported-operands\n return ret\n\n def track_codes(self, track, freq, rec_t_coords_every=5, train_coord_buffer_size=50):\n self.n_fails = 0\n self.train_set_metric_codes = track['train_metrics']\n self.train_metrics = []\n self.v_coords = {c:[] for c in track['val_coords_to_track']}\n self.t_coords = {c:[] for c in track['train_coords_to_track']}\n self.track_all = []\n self.dircts = {\n 'val_coords': ('/val_coords', 'coords', 0),\n 'train_coords': ('/train_coords', 'coords', 0),\n 'train': ('/train', 'losses', 0),\n 'train_metrics': ('/train_metrics', 'metrics', 0),\n 'validation': ('/validation', 'metrics', 0),\n }\n self.log_freq = freq\n self.rec_t_coords_every = rec_t_coords_every\n self.train_coord_buffer_size = train_coord_buffer_size\n self.sup = SVDSuperimposer()\n\n self.epo = 0\n \n def save_data(self, key, data):\n for _,(d,_,_) in self.dircts.items():\n if not os.path.isdir(self.trainer.log_dir + d): \n os.mkdir(self.trainer.log_dir + d)\n\n _dir, name, curr_ix = self.dircts[key]\n f = open('%s/%s%d.json'%(self.trainer.log_dir + _dir, name, curr_ix), 'w')\n f.write(json.dumps(data))\n f.close()\n self.dircts[key] = (_dir, name, curr_ix+1)\n \n def tidy_data(self):\n ''' go through data dirs and open, collect, jsonify, compress, save, delete old files '''\n # left overs..\n coords = {code:[(i,c.tolist()) for i,c in tenlist] for code, tenlist in self.v_coords.items()}\n self.save_data('val_coords', coords)\n convert = lambda lt: [(e,i,c.tolist(),evd.tolist()) for e,i,c,evd in lt]\n coords = {code:convert(tenlist) for code, tenlist in self.t_coords.items()}\n self.save_data('train_coords', coords)\n\n print('Tidying logs...', end=' ')\n all_filenames = []\n\n def collect_coords(scrds, key):\n coord_dir, name, curr_ix = self.dircts[key]\n crds = {c:[] for c in scrds}\n filenames = ['%s/%s%d.json'%(self.trainer.log_dir + coord_dir, name, i) for i in range(curr_ix)]\n for fn in filenames:\n saved = json.loads(open(fn, 'r').read())\n for c in scrds:\n crds[c].extend(saved[c])\n all_filenames.extend(filenames)\n return crds\n \n def collect_dicts(key, extend=True):\n train_dir, name, curr_ix = self.dircts[key]\n lst = []\n filenames = ['%s/%s%d.json'%(self.trainer.log_dir + train_dir, name, i) for i in range(curr_ix)]\n for fn in filenames:\n if extend:\n lst.extend( json.loads(open(fn, 'r').read()) )\n else:\n lst.append( json.loads(open(fn, 'r').read()) )\n all_filenames.extend(filenames)\n return lst\n\n all_records = {\n 'train' : collect_dicts('train'),\n 'train_metrics' : collect_dicts('train_metrics', extend=False),\n 'valid_epochs' : collect_dicts('validation', extend=False),\n 'val_coords' : collect_coords(self.v_coords, 'val_coords'),\n 'train_coords' : collect_coords(self.t_coords, 'train_coords')\n }\n\n comp = gzip.compress(bytes(json.dumps(all_records), encoding='utf-8'))\n f = gzip.open(self.trainer.log_dir + '/records.gz', 'wb')\n f.write(comp)\n f.close()\n for fn in all_filenames: os.remove(fn)\n print('Done.')\n\n def training_step(self, train_batch, batch_idx):\n # pass in the chain-code\n code = train_batch['code']\n # del train_batch['code']\n\n out, [loss], losses = self.forward(train_batch)\n\n # add in other losses, e.g. FAPE and any metrics.\n self.track_all.append((batch_idx, code, loss.item(),\n losses['masked_msa'], losses['distogram'], \n losses['structure_module']))\n\n if self.epo%self.rec_t_coords_every==0 and code in self.t_coords:\n coords = out['structure_module']['final_frame_pos'].data#.detach()\n logits = out['distogram']['logits'].data\n bins = out['distogram']['bin_edges'].data\n # reconstruct dist matrix from logits and bins\n p = torch.exp(logits)\n p /= p.sum(-1)[...,None]\n diff = bins[1]-bins[0]\n b0 = torch.tensor([bins[0] - diff], device=self.global_config.device)\n bins = torch.cat((b0, bins), dim=0) - diff*0.5\n # in the distogram head- the one-hot enc is based on (dist > breaks).sum(-1)\n # so we must reverse the breaks (bins) so that it multiplies the appropriate indices\n evo_disto = torch.einsum('ijk,k->ij', logits, torch.flip(bins, (0,))).data\n\n self.t_coords[code].append((self.epo, batch_idx, coords, evo_disto))\n \n if sum(len(v) for _,v in self.t_coords.items()) >= self.train_coord_buffer_size:\n convert = lambda lt: [(e,i,c.tolist(),evd.tolist()) for e,i,c,evd in lt]\n coords = {code:convert(tenlist) for code, tenlist in self.t_coords.items()}\n # save data\n self.save_data('train_coords', coords)\n # reset the dict\n self.t_coords = {c:[] for c in self.t_coords}\n\n if len(self.track_all)>=self.log_freq:\n # save the losses etc.\n self.save_data('train', self.track_all)\n # reset the list\n self.track_all = []\n\n if code in self.train_set_metric_codes:\n try:\n tm, rmsd, gdt, lddt = get_metrics(train_batch, out, self.sup, self.global_config.device)\n except:\n tm, rmsd, gdt, lddt = [None]*4\n self.n_fails += 1\n self.train_metrics.append((batch_idx, code, tm, rmsd, gdt, lddt))\n\n if self.recall is not None:\n # batch_idx resets on each epoch\n if batch_idx//self.recall==0:\n # call scheduler\n # self.epo += int(batch_idx < self.pbatch_idx)\n self.pbatch_idx = batch_idx\n prop = float(batch_idx)/self.num_data_per_epoch\n t = self.epo + prop\n self.lr_schedulers().step(t)\n\n self.log('train_loss', loss)\n return loss\n\n def training_epoch_end(self, train_out):\n print('Failed to record %d codes metrics in training step'%self.n_fails)\n self.n_fails = 0\n self.save_data('train_metrics', self.train_metrics)\n self.train_metrics = []\n self.epo += 1\n\n def validation_step(self, val_batch, batch_idx):\n code = val_batch['code']\n\n out, [loss], losses = self.forward(val_batch)\n self.log('val_loss', loss)\n\n coords = out['structure_module']['final_frame_pos'].data#.detach()\n\n if code in self.v_coords:\n # this is the backbone coords and everythin needed to plot\n self.v_coords[code].append((batch_idx, coords))\n\n # this event happens once near the end of a deterministic validation epoch\n if all(len(v)>0 for _,v in self.v_coords.items()):\n coords = {code:[(i,c.tolist()) for i,c in tenlist] for code, tenlist in self.v_coords.items()}\n # save data\n self.save_data('val_coords', coords)\n # reset the dict\n self.v_coords = {c:[] for c in self.v_coords}\n \n try:\n tm, rmsd, gdt, lddt = get_metrics(val_batch, out, self.sup, self.global_config.device)\n except:\n tm, rmsd, gdt, lddt = [None]*4\n print('%s failed to record metrics in validation step'%code)\n\n # self.val_metrics.append((code, tm, rmsd, gdt, lddt, loss.item(),\n # losses['masked_msa'], losses['distogram'], losses['structure_module']))\n return (code, tm, rmsd, gdt, lddt, loss.item(), losses['masked_msa'], \n losses['distogram'], losses['structure_module'])\n \n def validation_epoch_end(self, val_outs):\n # save the validation metrics\n self.save_data('validation', val_outs)\n # self.val_metrics = []\n \n def test_step(self, test_batch, batch_idx):\n code = test_batch['code']\n\n out, [loss], losses = self.forward(test_batch)\n self.log('test_loss', loss)\n\n try:\n tm, rmsd, gdt, lddt = get_metrics(test_batch, out, self.sup, self.global_config.device)\n except:\n tm, rmsd, gdt, lddt = [None]*4\n print('%s failed to record metrics in test step'%code)\n\n return (code, tm, rmsd, gdt, lddt, loss.item(), losses['masked_msa'], \n losses['distogram'], losses['structure_module'])\n\n def test_epoch_end(self, test_out):\n comp = gzip.compress(bytes(json.dumps(test_out), encoding='utf-8'))\n f = gzip.open(self.trainer.log_dir + '/holdout_records.gz', 'wb')\n f.write(comp)\n f.close()\n\n def set_optim_config(self, cfg, n_epoch=None, num_data_per_epoch=None):\n opt_gr = cfg['optim_groups']\n self.optim_type = cfg['optim_type']\n groups = {k:[] for k in opt_gr}\n for n,p in self.named_parameters():\n added = False\n for keyword in opt_gr:\n if keyword in n:\n groups[keyword].append(p)\n added = True\n if not added:\n groups['default'].append(p)\n self.optim_groups = [{'params':groups[k], **v} for k,v in opt_gr.items() if len(groups[k])]\n\n self.num_data_per_epoch = num_data_per_epoch\n # self.epo = 0\n\n self.recall = None\n if 'scheduler' in cfg:\n print('scheduler found')\n if 'num_call_per_epoch' in cfg['scheduler']:\n n = cfg['scheduler']['num_call_per_epoch']\n self.recall = int( float(num_data_per_epoch) / float(n) )\n self.pbatch_idx = -1\n print('scheduler calling at '+str(self.recall))\n else:\n print('scheduler calling every epoch')\n\n sch = eval(cfg['scheduler']['class'])\n lmb_kw = eval(cfg['scheduler']['kwargs'])\n self.scheduler = (sch, lmb_kw)\n else:\n print('no scheduler found')\n self.scheduler = None\n\n def configure_optimizers(self, n_epoch=None):\n opt = eval('torch.optim.%s'%self.optim_type)\n optimizer = opt(self.optim_groups)\n\n if self.scheduler is None:\n return optimizer\n\n sch, lmb_kw = self.scheduler\n return [optimizer], [sch(optimizer, **lmb_kw)]\n\n\ndef TMLowerBound(pred, true, mask, device):\n ''' the bound of the TM score described in the alphafold paper '''\n predicted_affine = QuatAffine.from_tensor(pred, device)\n # Shape (num_res, 7)\n true_affine = QuatAffine.from_tensor(true, device)\n\n\n # print((len(predicted_affine.quaternion),\n # predicted_affine.quaternion[0].shape, \n # predicted_affine.translation[0].shape, \n # len(true_affine.quaternion),\n # true_affine.quaternion[0].shape, \n # true_affine.translation[0].shape))\n # (predicted_affine, true_affine)\n # (69, torch.Size([4]), torch.Size([69]), 2, torch.Size([69, 4]), torch.Size([2, 69]))\n \n # Shape (num_res, num_res)\n square_mask = mask[:, None] * mask[None, :]\n\n\n\n # (mask.shape, square_mask.shape)\n # (torch.Size([2, 69]), torch.Size([2, 2, 69]))\n\n # print('(mask.shape, square_mask.shape)') \n # print((mask.shape, square_mask.shape)) \n\n # num_bins = self.config.num_bins\n # # (1, num_bins - 1)\n # breaks = value['predicted_aligned_error']['breaks']\n # # (1, num_bins)\n # logits = value['predicted_aligned_error']['logits']\n\n # Compute the squared error for each alignment.\n def lfp(affine):\n ''' local frame points'''\n return affine.invert_point([x[...,None,:] for x in affine.translation], extra_dims=1)\n\n error_dist2 = 0\n for a, b in zip(lfp(predicted_affine), lfp(true_affine)):\n error_dist2 += (a - b)**2# summing over R3, pythag\n \n # error_dist2\n # torch.Size([2, 69, 69])\n \n # Shape (num_res, num_res)\n # First num_res are alignment frames, second num_res are the residues.\n\n # e = square_mask * error_dist2\n\n\n\n num_res = error_dist2.shape[0]\n # Compute d_0(num_res) as defined by TM-score, eqn. (5) in\n # http://zhanglab.ccmb.med.umich.edu/papers/2004_3.pdf\n # Yang & Skolnick \"Scoring function for automated\n # assessment of protein structure template quality\" 2004\n d0 = 1.24 * (max(num_res, 19) - 15) ** (1./3) - 1.8\n\n f = lambda d2:1/(1 + d2/(d0**2))\n\n # f(error_dist2).shape,square_mask.shape )\n # (torch.Size([2, 69, 69]), torch.Size([2, 2, 69]))\n return torch.max((f(error_dist2) * square_mask).sum(-1) / num_res)\n\ndef apx_lddt(predicted_points,\n true_points,\n true_points_mask,\n cutoff=15.,\n per_residue=False, \n device='cpu'):\n \"\"\"Measure (approximate) lDDT for a batch of coordinates.\n\n lDDT reference:\n Mariani, V., Biasini, M., Barbato, A. & Schwede, T. lDDT: A local\n superposition-free score for comparing protein structures and models using\n distance difference tests. Bioinformatics 29, 2722–2728 (2013).\n\n lDDT is a measure of the difference between the true distance matrix and the\n distance matrix of the predicted points. The difference is computed only on\n points closer than cutoff *in the true structure*.\n\n This function does not compute the exact lDDT value that the original paper\n describes because it does not include terms for physical feasibility\n (e.g. bond length violations). Therefore this is only an approximate\n lDDT score.\n\n Args:\n predicted_points: (batch, length, 3) array of predicted 3D points\n true_points: (batch, length, 3) array of true 3D points\n true_points_mask: (batch, length, 1) binary-valued float array. This mask\n should be 1 for points that exist in the true points.\n cutoff: Maximum distance for a pair of points to be included\n per_residue: If true, return score for each residue. Note that the overall\n lDDT is not exactly the mean of the per_residue lDDT's because some\n residues have more contacts than others.\n\n Returns:\n An (approximate, see above) lDDT score in the range 0-1.\n \"\"\"\n\n assert len(predicted_points.shape) == 3\n assert predicted_points.shape[-1] == 3\n assert true_points_mask.shape[-1] == 1\n assert len(true_points_mask.shape) == 3\n\n # Compute true and predicted distance matrices.\n dmat_true = (1e-10 + ((true_points[:, :, None] - true_points[:, None, :])**2).sum(-1))**0.5\n\n dmat_predicted = (1e-10 + (\n (predicted_points[:, :, None] - predicted_points[:, None, :])**2).sum(-1))**0.5\n\n dists_to_score = (\n (dmat_true < cutoff).float() * true_points_mask *\n true_points_mask.permute(0,2,1) * (1. - torch.eye(dmat_true.shape[1], device=device)) # Exclude self-interaction.\n )\n\n # Shift unscored distances to be far away.\n dist_l1 = torch.abs(dmat_true - dmat_predicted)\n\n # True lDDT uses a number of fixed bins.\n # We ignore the physical plausibility correction to lDDT, though.\n score = 0.25 * ((dist_l1 < 0.5).float() +\n (dist_l1 < 1.0).float() +\n (dist_l1 < 2.0).float() +\n (dist_l1 < 4.0).float())\n\n # Normalize over the appropriate axes.\n reduce_axes = (-1,) if per_residue else (-2, -1)\n norm = 1. / (1e-10 + dists_to_score.sum(reduce_axes))\n score = norm * (1e-10 + (dists_to_score * score).sum(reduce_axes))\n\n return score\n\ndef gdt_ts(predicted_points,\n true_points,\n true_points_mask,\n cutoffs=(1,2,4,8),\n per_residue=False,\n device='cpu'):\n \"\"\"Measure GDT-TS for a batch of coordinates.\n\n https://predictioncenter.org/casp14/doc/help.html#GDT_TS\n\n Args:\n predicted_points: (batch, length, 3) array of predicted 3D points\n true_points: (batch, length, 3) array of true 3D points\n true_points_mask: (batch, length, 1) binary-valued float array. This mask\n should be 1 for points that exist in the true points.\n cutoff: Maximum distance for a pair of points to be included\n per_residue: If true, return score for each residue. Note that the overall\n lDDT is not exactly the mean of the per_residue lDDT's because some\n residues have more contacts than others.\n\n Returns:\n An (approximate, see above) lDDT score in the range 0-1.\n \"\"\"\n\n assert len(predicted_points.shape) == 3\n assert predicted_points.shape[-1] == 3\n assert true_points_mask.shape[-1] == 1\n assert len(true_points_mask.shape) == 3\n\n # Compute true and predicted distance matrices.\n dmat_true = (1e-10 + ((true_points[:, :, None] - true_points[:, None, :])**2).sum(-1))**0.5\n\n dmat_predicted = (1e-10 + (\n (predicted_points[:, :, None] - predicted_points[:, None, :])**2).sum(-1))**0.5\n\n dists_to_score = (\n true_points_mask *\n true_points_mask.permute(0,2,1) * (1. - torch.eye(dmat_true.shape[1], device=device)) # Exclude self-interaction.\n )\n\n # Shift unscored distances to be far away.\n dist_l1 = torch.abs(dmat_true - dmat_predicted)\n\n # True lDDT uses a number of fixed bins.\n # We ignore the physical plausibility correction to lDDT, though.\n score = 0\n for c in cutoffs:\n score += (dist_l1 < c).float()\n score /= len(cutoffs)\n\n # Normalize over the appropriate axes.\n reduce_axes = (-1,) if per_residue else (-2, -1)\n norm = 1. / (1e-10 + dists_to_score.sum(reduce_axes))\n score = norm * (1e-10 + (dists_to_score * score).sum(reduce_axes))\n\n return score\n\ndef get_metrics(batch, out, sup, device):\n # the first dim is a repeat dim\n true = batch['backbone_affine_tensor'].data[0]\n mask = batch['backbone_affine_mask'].data[0]\n # coords of the origin atom\n true_coords = batch['pseudo_beta'].data[0]\n mask = batch['pseudo_beta_mask'].data[0]\n \n # TM-score (apx)\n pred = out['structure_module']['final_affines'].data\n coords = out['structure_module']['final_frame_pos'].data\n\n # (pred.shape, true.shape, mask.shape)\n # (torch.Size([69, 7]), torch.Size([2, 69, 7]), torch.Size([2, 69]))\n\n tm = TMLowerBound(pred, true, mask, device)\n \n # print('(true_coords.shape, coords.shape)')\n # print((true_coords.shape, coords.shape))\n\n # RMSD\n sup.set(true_coords[mask==1].clone().cpu().numpy(), coords[mask==1].clone().cpu().numpy())\n sup.run()\n rmsd = sup.get_rms()\n\n # GDT\n gdt = gdt_ts(coords[None,...], true_coords[None,...], mask[None,...,None], device=device)\n\n # LDDT\n lddt = apx_lddt(coords[None,...], true_coords[None,...], mask[None,...,None], device=device)\n return tm.item(), rmsd, gdt.item(), lddt.item()","sub_path":"OriginalTrainingScripts/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":112013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"306406650","text":"import sys\nimport os\n\ndef str_binary(n,padd=12):\n binfrmt = '{fill}{align}{width}{type}'.format(fill='0', align='>', width=padd, type='b')\n n = format(n,binfrmt)\n return n\n\ndef usage(e):\n print(\"do it right...\")\n print(e)\n sys.exit()\n\ndef myargs(sysargs):\n args = {}\n\n for val in sysargs[1:]:\n k,v = val.split('=')\n args[k] = v\n return args\n\ndef read_file(fin,delimiter=\"\\n\"):\n if os.path.isfile(fin):\n with open(fin) as f:\n data = f.read()\n data = data.strip()\n return data.split(delimiter)\n usage(\"Error: file does not exist in function 'read_file'...\")\n return None\n\nif __name__=='__main__':\n\n args = myargs(sys.argv)\n\n if not 'filename' in args:\n usage(\"Error: filename not on command line...\")\n\n data = read_file(args['filename'],\" \")\n\n for d in data:\n p,h = d.split(',')\n n = int(h, 16)\n b = str_binary(n,7)\n\n print(\"{} {} \\t{} \\t{}\".format(p,h,n,b))","sub_path":"Trunk/2019_Spring/Assignments/A00-hold/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"25081762","text":"\"\"\"\nGiven two strings, check if one is permutation of the other\n\"\"\"\nfrom collections import defaultdict\n\n\ndef check_permutation(first, second):\n \"\"\"\n four iterations...\n - check len\n - use default dict,\n - counting each letter,\n - and, compare the count for each letter\n\n Time complexity: O(n) + O(n) = O(n)\n\n \"\"\"\n first_dict = defaultdict(int)\n second_dict = defaultdict(int)\n\n if len(first) != len(second):\n return False\n\n for letter in first:\n first_dict[letter] += 1\n\n for letter in second:\n second_dict[letter] += 1\n\n for k, v in first_dict.items():\n if second_dict[k] != v:\n return False\n\n return True\n\n\nassert check_permutation('abcdef', 'febcda') is True\nassert check_permutation('abcd', 'abdc') is True\nassert check_permutation('abcd', 'adfe') is False\nassert check_permutation('abcd', 'abcc') is False\n","sub_path":"solutions/chapter-1-arrays-and-strings/02-check-permutation.py","file_name":"02-check-permutation.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"619473806","text":"from bs4 import BeautifulSoup\r\nimport urllib.request\r\ndef bs_scroll():\r\n params=[]\r\n for i in range(1,3):\r\n list_url = 'https://futurechosun.com/page/'+str(i)+'?s=%EB%B4%89%EC%82%AC'\r\n url = urllib.request.Request(list_url) \r\n result = urllib.request.urlopen(url).read().decode(\"utf-8\")\r\n soup = BeautifulSoup( result, \"html.parser\")\r\n result1=soup.find_all('div',class_ ='elementor-post__title')\r\n for i in result1:\r\n params.append(i.find_all('a')[0].get(\"href\"))\r\n my_result=set(params)\r\n my_result2=list(my_result)\r\n return my_result2\r\ndef bs_detail_scroll():\r\n list_url = bs_scroll()\r\n final=[]\r\n for i in list_url:\r\n url = urllib.request.Request(i) \r\n result = urllib.request.urlopen(url).read().decode(\"utf-8\")\r\n soup = BeautifulSoup( result, \"html.parser\")\r\n result1=soup.find_all('div',class_ ='elementor-element elementor-element-24e82692 elementor-widget__width-initial elementor-widget elementor-widget-theme-post-content')\r\n final.append(result1[0].get_text())\r\n return final\r\nprint(bs_detail_scroll())","sub_path":"GoogleImageCrawling.py","file_name":"GoogleImageCrawling.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"463941168","text":"#\r\n# This file is part of PyOLab. https://github.com/matsselen/pyolab\r\n# (C) 2017 Mats Selen \r\n#\r\n# SPDX-License-Identifier: BSD-3-Clause\r\n# (https://opensource.org/licenses/BSD-3-Clause)\r\n#\r\n\r\n# system stuff\r\nimport time\r\nimport numpy as np\r\n\r\n# local stuff\r\nfrom pyolabGlobals import G\r\nfrom iolabInfo import *\r\n\r\n\"\"\"\r\nThese methods are focused on dealing with the data received \r\nfrom the IOLab system. \r\n\r\n\"\"\"\r\n\r\n#=================================\r\n# returns the n'th record received\r\n#\r\ndef getAllRec(n):\r\n if n < len(G.allRecList):\r\n return G.recDict[G.allRecList[n][0]][G.allRecList[n][1]]\r\n else:\r\n return []\r\n\r\n#=========================================\r\n# returns the n'th data record received\r\n#\r\ndef getDataRec(n):\r\n if n < len(G.dataRecList):\r\n return G.recDict[G.dataRecList[n][0]][G.dataRecList[n][1]]\r\n else:\r\n return []\r\n\r\n#=========================================\r\n# returns the n'th command record received\r\n#\r\ndef getCommRec(n):\r\n if n < len(G.commRecList):\r\n return G.recDict[G.commRecList[n][0]][G.commRecList[n][1]]\r\n else:\r\n return []\r\n\r\n\r\n#===========================================================================================\r\n# This method spins through the raw data array and finds the actual data packet records received \r\n# from the remote. These are described in detail in the Indesign USB Interface Specification \r\n# document that can be found at at Documentation/IOLab_usb_interface_specs.pdf)\r\n# \r\n# The records are put into dictionary recDict (see pyolabGlobals.py)\r\n#\r\ndef findRecords():\r\n\r\n i = G.nextData # where we will start looking\r\n iLast = len(G.dataList) # where we will stop looking\r\n\r\n # work through the data looking for valid records and saving these to G.recDict\r\n # \r\n while i < (iLast - 3):\r\n if (G.dataList[i] == 2): # find start of packet (SOP) byte = 0x2\r\n # find record type\r\n for recType in G.recTypeList:\r\n if G.dataList[i+1] == recType:\r\n\r\n # find byte count (BC)\r\n # see if we can find the end of packet (EOP) byte = 0xa\r\n ndata = G.dataList[i+2]\r\n # check that this isn't past the end of the list\r\n if i+3+ndata < iLast:\r\n if G.dataList[i+3+ndata] == 0xa:\r\n # if SOP, BC, and EOP are all consistent then save the record\r\n rec = G.dataList[i:i+4+ndata]\r\n index = len(G.recDict[recType])\r\n\r\n # all records: [recType,index] points into recDict[recType][index]\r\n G.allRecList.append([recType,index])\r\n if recType == G.recType_dataFromRemote:\r\n # data records: [recType,index] points into recDict[recType][index]\r\n G.dataRecList.append([recType,index])\r\n else:\r\n # command records: [recType,index] points into recDict[recType][index]\r\n G.commRecList.append([recType,index])\r\n\r\n # add record to the appropriate list in the record dictionary\r\n G.recDict[recType].append(rec)\r\n # if the thing we just received was a NACK it means a command was\r\n # not properly serviced, so we should tell someone\r\n if recType == G.recType_NACK:\r\n if G.logData:\r\n G.logFile.write(\"\\nNACK: \" + str(rec))\r\n\r\n # figure out where we are starting next\r\n G.nextData = i + 4 + ndata # where the next record starts\r\n i = G.nextData - 1 # since we are adding 1 after the break\r\n break\r\n else:\r\n # shouldn't ever get here but check just in case\r\n if G.logData:\r\n G.logFile.write(\"\\nguessed wrong recType ' + hex(recType) + ' at i = \"+str(i))\r\n else:\r\n break\r\n i += 1\r\n\r\n else:\r\n i += 1\r\n\r\n\r\n#=================================================================\r\n# This method looks for changes to the fixed configuration of the \r\n# IOLab remote (for now just assumes you are using one remote)\r\n#\r\ndef findLastConfig():\r\n\r\n # look for fixed config information\r\n if len(G.recDict[G.recType_getFixedConfig]) > 0:\r\n fc = G.recDict[G.recType_getFixedConfig][-1][4] # the latest fixed config\r\n else:\r\n fc = 0 # or 0 if none found\r\n\r\n # if new, save it and print it\r\n if fc != G.lastFixedConfig: \r\n G.lastFixedConfig = fc\r\n if G.logData:\r\n G.logFile.write(\"\\nNew fixed configuration \" + str(fc))\r\n\r\n\r\n # look for packet config information\r\n if len(G.recDict[G.recType_getPacketConfig]) > 0:\r\n pc = G.recDict[G.recType_getPacketConfig][-1][4:-1] # the latest packet config\r\n else:\r\n pc = [] # or [] if none found\r\n\r\n # if new, save it and print it\r\n if pc != G.lastPacketConfig: \r\n G.lastPacketConfig = pc\r\n\r\n sc = {}\r\n for i in range(pc[0]): # decode the packet config record\r\n s = pc[i*2+1] # sensor\r\n l = pc[i*2+2] # max data length\r\n sc[s] = l\r\n\r\n G.lastSensorBytes = sc # save it\r\n G.configIsSet = True\r\n\r\n if G.logData:\r\n G.logFile.write(\"\\nNew packet configuration \" + str(pc))\r\n G.logFile.write(\"\\nNew sensor configuration \" + str(sc))\r\n\r\n#===================================================================\r\n# Extracts the payload data from dataFromRemote records and calls \r\n# extractSensorData() to extract raw sensor data from these \r\n#\r\ndef decodeDataPayloads():\r\n\r\n # we can only do this if we know what sensors to expect\r\n if len(G.lastSensorBytes) == 0:\r\n if G.logData:\r\n G.logFile.write(\"\\n len(G.lastSensorBytes) = \" + str(len(G.lastSensorBytes)))\r\n G.logFile.write(\" this will happen if you haven't sent a getPacketConfig command\")\r\n return\r\n\r\n\r\n nRec = len(G.recDict[G.recType_dataFromRemote])\r\n if nRec > G.nextRecord:\r\n for n in range(G.nextRecord,nRec):\r\n r = G.recDict[G.recType_dataFromRemote][n]\r\n\r\n recSequence = r[5] # record sequence byte (incremented every record)\r\n nSens = r[6] # number of sensors in this data record\r\n\r\n # this should be the same as the number expected for this config\r\n if nSens != len(G.lastSensorBytes):\r\n\r\n if G.logData:\r\n G.logFile.write(\"\\nsensors found \"+str(nSens)+\" expected \"+str(len(G.lastSensorBytes)))\r\n G.logFile.write(\"this can happen if you havent sent a getPacketConfig command\")\r\n\r\n i = 7 # pointer to info and data from first sensor\r\n nSaved = 0 # the number of sensors we have saved data from\r\n while nSaved < nSens:\r\n thisSensor = r[i] & 0x7F # ID of the current sensor\r\n sensorOverflow = r[i] > thisSensor # is overflow bit set?\r\n\r\n # the first couple if records may have the overflow bit set\r\n if G.logData and sensorOverflow:\r\n G.logFile.write(\"\\noverflow on recSequence \" +str(recSequence)+\" sensor \"+str(thisSensor)+\" nSaved \"+str(nSaved)+\" nSens \"+str(nSens))\r\n\r\n # make sure thisSensor is on the list of expected sensors for this config\r\n if thisSensor in G.lastSensorBytes:\r\n nValidBytes = r[i+1]\r\n sensorBytes = r[i+2:i+2+nValidBytes]\r\n\r\n # this is where the the good stuff happens\r\n extractSensorData(thisSensor,sensorBytes)\r\n else:\r\n # if we ever get here we need to tell Mats there is a problem.\r\n if G.logData:\r\n G.logFile.write(\"\\nBailing out after finding wrong sensor: \" +str(thisSensor) + \" in \" + str(r))\r\n\r\n return\r\n\r\n nSaved += 1\r\n\r\n i += (2 + G.lastSensorBytes[thisSensor])\r\n\r\n G.nextRecord = nRec\r\n\r\n#======================================================================\r\n# Extracts the raw (uncalibrated) data from individual sensor sub-payloads. \r\n# For details see \r\n#\r\n# Inputs:\r\n# sensor the number of the sensor we are decoding as per sensorName()\r\n# data the data payload we are decoding\r\n#\r\n# Output:\r\n# The information is placed in a global dictionary G.uncalDataDict\r\n# (see pyolabGlobals.py)\r\n#\r\ndef extractSensorData(sensor,data):\r\n\r\n # The different code segments below deal with data from different sensors. \r\n # Only sensors marked with '*' in the list below are extracted so far\r\n # (I'm still working on the sensors labeled with '-')\r\n #\r\n # * 'Accelerometer',\r\n # * 'Magnetometer',\r\n # * 'Gyroscope',\r\n # * 'Barometer',\r\n # * 'Microphone',\r\n # * 'Light',\r\n # * 'Force',\r\n # * 'Wheel',\r\n # - 'ECG3',\r\n # - 'Battery',\r\n # * 'HighGain',\r\n # * 'Analog7',\r\n # * 'Analog8',\r\n # * 'Analog9',\r\n # * 'Thermometer',\r\n # - 'ECG9'\r\n #\r\n # Note also that the extracted data is uncalibrated. \r\n # Examples of applying calibration constants cane be found \r\n # in Documentation/old_csharp_code.cs. \r\n #\r\n # For some sensors these calibration constants are\r\n # known (Analog and HighGain for example), for some they needs to be extracted \r\n # from the system with a getCalibration() call (barometer for example), and for others\r\n # they need to be measured by the user (force probe & magnetometer for example).\r\n # \r\n # The code below does not apply any calibration - it just extracts the uncalibrated data.\r\n #\r\n\r\n\r\n #-----------------------\r\n # Accelerometer\r\n # A 16 bit signed number for each of the three axes.\r\n # Full scale depends on device settings. Default is 4g. \r\n # Calibration needed for both scale and offset. \r\n #\r\n if sensorName(sensor) == 'Accelerometer':\r\n # data comes in 6 byte blocks\r\n if(len(data)%6 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nAccelerometer data not a multiple of 6 bytes\")\r\n else:\r\n nsets = len(data)/6\r\n for i in range(nsets):\r\n d = data[i*6:i*6+6]\r\n d01 = np.int16(d[0]<<8 | d[1])\r\n d23 = np.int16(d[2]<<8 | d[3])\r\n d45 = np.int16(d[4]<<8 | d[5])\r\n # the odd ordering & signs of the data below represent \r\n # the fact that the sensor is rotated on the PCB \r\n G.uncalDataDict[sensor].append([-d23,d01,d45])\r\n\r\n #-----------------------\r\n # Magnetometer\r\n # A 16 bit signed number for each of the three axes.\r\n # Full scale depends on device settings.\r\n # Calibration needed for both scale and offset. \r\n #\r\n if sensorName(sensor) == 'Magnetometer':\r\n # data comes in 6 byte blocks\r\n if(len(data)%6 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nMagnetometer data not a multiple of 6 bytes\")\r\n else:\r\n nsets = len(data)/6\r\n for i in range(nsets):\r\n d = data[i*6:i*6+6]\r\n d01 = np.int16(d[0]<<8 | d[1])\r\n d23 = np.int16(d[2]<<8 | d[3])\r\n d45 = np.int16(d[4]<<8 | d[5])\r\n G.uncalDataDict[sensor].append([-d01,-d23,-d45])\r\n\r\n #-----------------------\r\n # Gyroscope\r\n # A 16 bit signed number for each of the three axes. Linear with omega.\r\n # Full scale depends on device settings.\r\n # Calibration needed for both scale and offset. \r\n #\r\n if sensorName(sensor) == 'Gyroscope':\r\n # data comes in 6 byte blocks\r\n if(len(data)%6 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nGyroscope data not a multiple of 6 bytes\")\r\n else:\r\n nsets = len(data)/6\r\n for i in range(nsets):\r\n d = data[i*6:i*6+6]\r\n d01 = np.int16(d[0]<<8 | d[1])\r\n d23 = np.int16(d[2]<<8 | d[3])\r\n d45 = np.int16(d[4]<<8 | d[5])\r\n # the odd ordering & signs of the data below represent \r\n # the fact that the sensor is rotated on the PCB \r\n G.uncalDataDict[sensor].append([-d23,d01,d45])\r\n\r\n #-----------------------\r\n # Microphone. \r\n # Linear with intensity (I assume). Returns 16 bit unsigned number. \r\n #\r\n if sensorName(sensor) == 'Microphone':\r\n # data comes in 2 byte blocks\r\n if(len(data)%2 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nMicrophone data not a multiple of 2 bytes\")\r\n else:\r\n nsets = len(data)/2\r\n for i in range(nsets):\r\n d = data[i*2:i*2+2]\r\n d01 = np.uint16(d[0]<<8 | d[1])\r\n G.uncalDataDict[sensor].append(d01)\r\n\r\n #-----------------------\r\n # Light\r\n # Linear with intensity (I assume). Returns 16 bit unsigned number. \r\n #\r\n if sensorName(sensor) == 'Light':\r\n # data comes in 2 byte blocks\r\n if(len(data)%2 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nLight data not a multiple of 2 bytes\")\r\n else:\r\n nsets = len(data)/2\r\n for i in range(nsets):\r\n d = data[i*2:i*2+2]\r\n d01 = np.uint16(d[0]<<8 | d[1])\r\n G.uncalDataDict[sensor].append(d01)\r\n\r\n #-----------------------\r\n # Force\r\n # A 16 bit signed number derived by measuring the B-field of a magnet \r\n # that moves in response to an applied force. Linear with force. \r\n # Calibration needed for both scale and offset. \r\n #\r\n if sensorName(sensor) == 'Force':\r\n # data comes in 2 byte blocks\r\n if(len(data)%2 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nForce data not a multiple of 2 bytes\")\r\n else:\r\n nsets = len(data)/2\r\n for i in range(nsets):\r\n d = data[i*2:i*2+2]\r\n d01 = np.int16(d[0]<<8 | d[1])\r\n G.uncalDataDict[sensor].append(d01)\r\n\r\n #-----------------------\r\n # Wheel\r\n # A 16 bit signed number. Each measurement is the change of the wheels position \r\n # in 1mm increments since the last measurement. Measurement interval is 1/100 sec. \r\n #\r\n if sensorName(sensor) == 'Wheel':\r\n # data comes in 2 byte blocks\r\n if(len(data)%2 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nWheel data not a multiple of 2 bytes\")\r\n else:\r\n nsets = len(data)/2\r\n for i in range(nsets):\r\n d = data[i*2:i*2+2]\r\n d01 = np.int16(d[0]<<8 | d[1]) # dr\r\n G.uncalDataDict[sensor].append(d01)\r\n\r\n #-----------------------\r\n # HighGain\r\n # G+/G- feeds DC coupled differential op-amp w/ gain 1400\r\n # Op-amp output feeds internal 12 bit ADC (raw ADC values [0 - 4095])\r\n # Full scale count is +- 3V/2 = +- 1500 mV\r\n # Zero offset 0x7FF (half full scale)\r\n # Full scale deflection = 1500mV/1400 = 1.07 mV\r\n # counts per volt = 2048 * 1400 / 1500\r\n # \r\n if sensorName(sensor) == 'HighGain':\r\n # data comes in 2 byte blocks\r\n if(len(data)%2 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nHighGain data not a multiple of 2 bytes\")\r\n else:\r\n nsets = len(data)/2\r\n for i in range(nsets):\r\n d = data[i*2:i*2+2]\r\n d01 = np.uint16(d[0]<<8 | d[1])\r\n G.uncalDataDict[sensor].append(d01)\r\n\r\n #-----------------------\r\n # Analog7\r\n # Feeds internal 12 bit ADC (raw ADC values [0 - 4095])\r\n # Full scale corresponds to either 3.0V or 3.3V depending on configuration\r\n # (see configName(); if configuration name contains '3V3' reference is 3.3V)\r\n #\r\n if sensorName(sensor) == 'Analog7':\r\n # data comes in 2 byte blocks\r\n if(len(data)%2 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nAnalog7 data not a multiple of 2 bytes\")\r\n else:\r\n nsets = len(data)/2\r\n for i in range(nsets):\r\n d = data[i*2:i*2+2]\r\n d01 = np.uint16(d[0]<<8 | d[1])\r\n G.uncalDataDict[sensor].append(d01)\r\n\r\n #-----------------------\r\n # Analog8\r\n # Feeds internal 12 bit ADC (raw ADC values [0 - 4095])\r\n # Full scale corresponds to either 3.0V or 3.3V depending on configuration\r\n # (see configName(); if configuration name contains '3V3' reference is 3.3V)\r\n #\r\n if sensorName(sensor) == 'Analog8':\r\n # data comes in 2 byte blocks\r\n if(len(data)%2 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nAnalog8 data not a multiple of 2 bytes\")\r\n else:\r\n nsets = len(data)/2\r\n for i in range(nsets):\r\n d = data[i*2:i*2+2]\r\n d01 = np.uint16(d[0]<<8 | d[1])\r\n G.uncalDataDict[sensor].append(d01)\r\n\r\n #-----------------------\r\n # Analog9\r\n # Feeds internal 12 bit ADC (raw ADC values [0 - 4095])\r\n # Full scale corresponds to either 3.0V or 3.3V depending on configuration\r\n # (see configName(); if configuration name contains '3V3' reference is 3.3V)\r\n #\r\n if sensorName(sensor) == 'Analog9':\r\n # data comes in 2 byte blocks\r\n if(len(data)%2 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nAnalog9 data not a multiple of 2 bytes\")\r\n else:\r\n nsets = len(data)/2\r\n for i in range(nsets):\r\n d = data[i*2:i*2+2]\r\n d01 = np.uint16(d[0]<<8 | d[1])\r\n G.uncalDataDict[sensor].append(d01)\r\n\r\n\r\n #-----------------------\r\n # Barometer\r\n # The uncalibrated data read from the Barometer chip represents both\r\n # pressure and temperature, but calibration is needed in order to turn\r\n # these into useful numbers. The calibration constants are programmed into\r\n # the barometer chip itself and can be extracted by sending the system a \r\n # \"getCalibration()\" request with the appropriate parameters. \r\n # (see IOLab_usb_interface_specs.pdf and IOLab_data_specs.pdf in Documentation/)\r\n #\r\n # from Mats old code (where i1 == d01 and i2 == d23 below):\r\n # // keep only the lowest 10 bits in each\r\n # i1 = (i1 >> 6) & (uint)0x3FF;\r\n # i2 = (i2 >> 6) & (uint)0x3FF;\r\n #\r\n # // Apply calibration\r\n # s.cal.P = Pressure(i1, i2);\r\n # \r\n # and see CalculateCalibrationConstants() and Pressure() in Documentation/old_csharp_code.cs\r\n #\r\n if sensorName(sensor) == 'Barometer':\r\n # data comes in 4 byte blocks\r\n if(len(data)%4 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nBarometer data not a multiple of 4 bytes\")\r\n else:\r\n nsets = len(data)/4\r\n for i in range(nsets):\r\n d = data[i*4:i*4+4]\r\n d01 = np.uint16(d[0]<<8 | d[1])\r\n d23 = np.uint16(d[2]<<8 | d[3])\r\n\r\n G.uncalDataDict[sensor].append([d01,d23])\r\n\r\n\r\n #-----------------------\r\n # Thermometer\r\n # The uncalibrated Thermometer data is oversampled \r\n # (it needs to be divided by 400, which is done in this code), \r\n # and then the result needs to be turned into a temperature by a linear function \r\n # for which we know the slope and intercept:\r\n # cal = 30 + (raw - calAt30degrees)*(85-30)/(calAt85degrees-calAt30degrees)\r\n # where calAt30degrees = 2041 and calAt85degrees = 2426\r\n # caution - these values are from some older code of Mats and should be \r\n # suspect until verified. \r\n #\r\n if sensorName(sensor) == 'Thermometer':\r\n # data comes in 4 byte blocks\r\n if(len(data)%4 > 0):\r\n if G.logData:\r\n G.logFile.write(\"\\nThermometer data not a multiple of 4 bytes\")\r\n else:\r\n nsets = len(data)/4\r\n for i in range(nsets):\r\n d = data[i*4:i*4+4]\r\n d0123 = np.uint( d[0]<<24 | d[1]<<16 | d[2]<<8 | d[3] )\r\n\r\n G.uncalDataDict[sensor].append(d0123)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"PyOLabCode/dataMethods.py","file_name":"dataMethods.py","file_ext":"py","file_size_in_byte":20872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"256069899","text":"\n\nfrom xai.brain.wordbase.nouns._apologist import _APOLOGIST\n\n#calss header\nclass _APOLOGISTS(_APOLOGIST, ):\n\tdef __init__(self,): \n\t\t_APOLOGIST.__init__(self)\n\t\tself.name = \"APOLOGISTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"apologist\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_apologists.py","file_name":"_apologists.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"367063801","text":"import pickle\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import shuffle\n\nwith open(\"/home/canok/Desktop/TransferLearning/Autonomus/Section11/traffic-signs-data/train.p\",mode=\"rb\") as training_data:\n train=pickle.load(training_data)\n\n\nwith open(\"/home/canok/Desktop/TransferLearning/Autonomus/Section11/traffic-signs-data/test.p\",mode=\"rb\") as test_data:\n test=pickle.load(test_data)\n\n\n\nwith open(\"/home/canok/Desktop/TransferLearning/Autonomus/Section11/traffic-signs-data/valid.p\",mode=\"rb\") as valid_data:\n valid=pickle.load(valid_data)\n\n\n\nx_train,y_train=train[\"features\"],train['labels']\n\nx_valid,y_valid=valid[\"features\"],valid[\"labels\"]\n\nx_test,y_test=test[\"features\"],test['labels']\n\n\nprint(x_train.shape)\n\nplt.imshow(x_train[1000])\nplt.show()\n\nx_train,y_train=shuffle(x_train,y_train)\n\n\nx_train_gray=np.sum(x_train/3,axis=3, keepdims=True)\n\nx_test_gray=np.sum(x_test/3, axis=3, keepdims=True)\n\nx_valid_gray=np.sum(x_valid/3,axis=3, keepdims=True)\n\n\nx_train_gray_norm=(x_train_gray-128)/128\n\nx_test_gray_norm=(x_test_gray-128)/128\n\nx_valid_gray_norm=(x_valid_gray-128)/128\n\nprint(\"Readu to train\",x_train_gray.shape)\n\n\nplt.imshow(x_test_gray[1000].squeeze(),cmap='gray')\nplt.figure()\nplt.imshow(x_test[1000])\nplt.show()","sub_path":"Traffic sign classification/preparation.py","file_name":"preparation.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"445479136","text":"\n# Import pandas\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n\n\n\n# Read in white wine data\nwhite = pd.read_csv(\"data/winequality-white.csv\", sep=';')\n\n# Read in red wine data\nred = pd.read_csv(\"data/winequality-red.csv\", sep=';')\n\n\nnp.random.seed(570)\n\nredlabels = np.unique(red['quality'])\nwhitelabels = np.unique(white['quality'])\n\n\n\n# Add `type` column to `red` with value 1\nred['type'] = 1\n\n# Add `type` column to `white` with value 0\nwhite['type'] = 0\n\n# Append `white` to `red`\nwines = red.append(white, ignore_index=True)\n\nnp.random.seed(570)\n\nredlabels = np.unique(red['quality'])\nwhitelabels = np.unique(white['quality'])\n\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(1, 2, figsize=(8, 4))\nredcolors = np.random.rand(6 ,4)\nwhitecolors = np.append(redcolors, np.random.rand(1 ,4), axis=0)\n\nfor i in range(len(redcolors)):\n redy = red['alcohol'][red.quality == redlabels[i]]\n redx = red['volatile acidity'][red.quality == redlabels[i]]\n ax[0].scatter(redx, redy, c=redcolors[i])\nfor i in range(len(whitecolors)):\n whitey = white['alcohol'][white.quality == whitelabels[i]]\n whitex = white['volatile acidity'][white.quality == whitelabels[i]]\n ax[1].scatter(whitex, whitey, c=whitecolors[i])\n\nax[0].set_title(\"Red Wine\")\nax[1].set_title(\"White Wine\")\nax[0].set_xlim([0 ,1.7])\nax[1].set_xlim([0 ,1.7])\nax[0].set_ylim([5 ,15.5])\nax[1].set_ylim([5 ,15.5])\nax[0].set_xlabel(\"Volatile Acidity\")\nax[0].set_ylabel(\"Alcohol\")\nax[1].set_xlabel(\"Volatile Acidity\")\nax[1].set_ylabel(\"Alcohol\")\n# ax[0].legend(redlabels, loc='best', bbox_to_anchor=(1.3, 1))\nax[1].legend(whitelabels, loc='best', bbox_to_anchor=(1.3, 1))\n# fig.suptitle(\"Alcohol - Volatile Acidity\")\nfig.subplots_adjust(top=0.85, wspace=0.7)\n\nplt.show()","sub_path":"relacionAlcoholVolatilidad.py","file_name":"relacionAlcoholVolatilidad.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"127855180","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import models, fields, api, _\nimport xlwt\nimport io\nimport base64\nfrom xlwt import easyxf\nimport datetime\n\nfrom odoo import models, fields, api, _\nfrom odoo.tools.safe_eval import safe_eval\nfrom odoo.exceptions import UserError\nfrom dateutil.relativedelta import relativedelta\nimport calendar\n\n\nclass ticstockreport(models.TransientModel):\n _name = \"ticl.stock.report\"\n _description = \"Received Stock Summary Report\"\n\n @api.onchange('from_date', 'to_date')\n def onchange_week(self):\n from_date = str(self.from_date)\n to_date = str(self.to_date)\n if to_date < from_date:\n return {\n 'warning': {\n 'title': \"Warning\",\n 'message': \"To Date Should be higher than From Date\",\n }\n }\n\n\n from_date = fields.Datetime(string='From Date')\n to_date = fields.Datetime(string='To Date')\n inventory_summary_file = fields.Binary('Inbound Inventory Report')\n file_name = fields.Char('File Name')\n inventory_report_printed = fields.Boolean('Inbound Inventory Report')\n print_type = fields.Selection([('excel','Excel'),('pdf','PDF')], string='Print Type')\n warehouse_ids = fields.Many2many('stock.location', string='Warehouse')\n #location_id = fields.Many2many('stock.location', string='Location Name')\n\n\n #@api.multi\n def action_print_inventory_inbound(self):\n if self.print_type == 'pdf':\n\n return {\n 'type': 'ir.actions.report',\n 'report_name': 'ticl_inventory_xls_report.stock_report_pdf',\n 'model': 'ticl.stock.report',\n 'report_type': \"qweb-pdf\",\n\n }\n if self.print_type == 'excel':\n workbook = xlwt.Workbook()\n date_split_1 = str(self.from_date).split(\" \")\n rd = date_split_1[0].split('-')\n date_custom = '{0} {1}.{2}'.format(self.from_date.strftime('%b'), int(rd[2]), int(rd[0]))\n ans = calendar.weekday(int(rd[0]), int(rd[1]), int(rd[2]))\n date_split_2 = str(self.to_date).split(\" \")\n rd_1 = date_split_2[0].split('-')\n date_custom1 = '{0} {1}.{2}'.format(self.to_date.strftime('%b'), int(rd_1[2]), int(rd_1[0]))\n ans_1 = calendar.weekday(int(rd_1[0]), int(rd_1[1]), int(rd_1[2]))\n days = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n \n column_heading_style = easyxf('font:bold True;pattern: pattern solid, fore_colour gray25;align: horiz left')\n worksheet = workbook.add_sheet('Received Stock Summary Report')\n worksheet.write(1, 3, '{0} {1}'.format(days[ans], date_custom), easyxf('font:height 200;font:bold True;align: horiz center;'))\n worksheet.write(1, 4, 'To', easyxf('font:height 200;font:bold True;align: horiz center;'))\n worksheet.write(1, 5, '{0} {1}'.format(days[ans_1], date_custom1), easyxf('font:height 200;font:bold True;align: horiz center;'))\n worksheet.write(3, 0, _('Type'), column_heading_style)\n worksheet.write(3, 1, _('Manufacturer'), column_heading_style)\n worksheet.write(3, 2, _('Model'), column_heading_style)\n worksheet.write(3, 3, _('Serial#'), column_heading_style)\n worksheet.write(3, 4, _('Count'), column_heading_style)\n worksheet.write(3, 5, _('Condition'), column_heading_style)\n worksheet.write(3, 6, _('Received Date'), column_heading_style)\n worksheet.write(3, 7, _('Receipt Id'), column_heading_style)\n worksheet.write(3, 8, _('Warehouse'), column_heading_style)\n\n\n worksheet.col(0).width = 5000\n worksheet.col(1).width = 5000\n worksheet.col(2).width = 3000\n worksheet.col(3).width = 5000\n worksheet.col(4).width = 4500\n worksheet.col(5).width = 5000\n worksheet.col(6).width = 5800\n worksheet.col(7).width = 6000\n worksheet.col(8).width = 5000\n worksheet.col(9).width = 3000\n worksheet.col(10).width = 5000\n worksheet.col(11).width = 3000\n worksheet.col(12).width = 5000\n worksheet.col(13).width = 3000\n\n row = 4\n for wizard in self:\n heading = 'Received Stock Summary Report'\n worksheet.write_merge(0, 0, 0, 8, heading, easyxf(\n 'font:height 210; align: horiz center;pattern: pattern solid, fore_color yellow; font: color black; font:bold True;' \"borders: top thin,bottom thin\"))\n inventory_objs = self.env['stock.move.line'].search([('received_date', '>=', date_split_1[0] +' 00:00:00'),('received_date', '<=', date_split_2[0] +' 23:59:59'),('ticl_warehouse_id', '=',self.warehouse_ids.ids)])\n list=[]\n single_list = ''\n for i in inventory_objs:\n list.append(i.origin)\n for inventory in inventory_objs:\n reveive_date = str(inventory.received_date).split(\" \")\n rd = reveive_date[0].split('-')\n reveived_date = '{0}-{1}-{2}'.format(inventory.received_date.strftime(\"%b\"),int(rd[2]),int(rd[0]))\n ans =calendar.weekday(int(rd[0]),int(rd[1]),int(rd[2]))\n count = list.count(inventory.origin)\n if count <=1 :\n worksheet.write(row, 0, inventory.categ_id.name or '')\n worksheet.write(row, 1, inventory.manufacturer_id.name or '')\n worksheet.write(row, 2, inventory.product_id.name or '')\n worksheet.write(row, 3, inventory.serial_number or '')\n worksheet.write(row, 4, count or '',xlwt.easyxf(\"align: horiz left\"))\n worksheet.write(row, 5, inventory.condition_id.name or '')\n worksheet.write(row, 6, '{0} {1}'.format(days[ans], reveived_date) or '')\n worksheet.write(row, 7, inventory.origin or '')\n worksheet.write(row, 8, inventory.ticl_warehouse_id.name or '')\n\n row += 1\n elif count >1:\n if inventory.origin != single_list :\n summary_log = self.env['ticl.receipt'].search([('name','=',inventory.origin)])\n for inv in summary_log.ticl_receipt_lines:\n worksheet.write(row, 0, inv.tel_type.name or '')\n worksheet.write(row, 1, inventory.manufacturer_id.name or '')\n worksheet.write(row, 2, inv.product_id.name or '')\n worksheet.write(row, 3, inv.serial_number or '')\n worksheet.write(row, 4, inv.count_number or '',xlwt.easyxf(\"align: horiz left\"))\n worksheet.write(row, 5, inv.condition_id.name or '')\n worksheet.write(row, 6, '{0} {1}'.format(days[ans],reveived_date) or '')\n worksheet.write(row, 7, inventory.origin or '')\n worksheet.write(row, 8, inventory.ticl_warehouse_id.name or '')\n\n row += 1\n single_list = inventory.origin\n fp = io.BytesIO()\n workbook.save(fp)\n excel_file = base64.encodestring(fp.getvalue())\n self.inventory_summary_file = excel_file\n self.file_name = str(wizard.from_date) + '_' + 'Received Stock Summary Report'\n fp.close()\n return {\n 'type': 'ir.actions.act_url',\n 'name': 'Received Stock Summary Report',\n 'url': '/web/content/ticl.stock.report/%s/inventory_summary_file/%s.xls?download=true' % (\n self.id,self.file_name)\n\n }\n # @api.multi\n def get_receive_date_values(self,received_date):\n reveived_date = str(received_date).split(\" \")\n rd = reveived_date[0].split('-')\n reveived_date = '{0}-{1}-{2}'.format(received_date.strftime(\"%b\"), int(rd[2]), int(rd[0]))\n ans = calendar.weekday(int(rd[0]), int(rd[1]), int(rd[2]))\n days = [\"Mon\", \"Tue\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"]\n dates = '{0} {1}'.format(days[ans],reveived_date)\n return dates\n\n\n #@api.multi\n def get_from_date_values(self, from_date):\n x = from_date\n from_date = str(from_date).split(\" \")\n rd = from_date[0].split('-')\n from_date = '{0} {1}.{2}'.format(x.strftime('%b'), int(rd[2]), int(rd[0]))\n ans = calendar.weekday(int(rd[0]), int(rd[1]), int(rd[2]))\n days = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n dates = '{0} {1}'.format(days[ans], from_date)\n return dates\n\n\n #@api.multi\n def get_to_date_values(self, to_date):\n x = to_date\n to_date = str(to_date).split(\" \")\n rd = to_date[0].split('-')\n to_date = '{0} {1}.{2}'.format(x.strftime('%b'), int(rd[2]), int(rd[0]))\n ans = calendar.weekday(int(rd[0]), int(rd[1]), int(rd[2]))\n days = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n dates = '{0} {1}'.format(days[ans], to_date)\n return dates\n\n\n\n #@api.multi\n def get_report_values(self,data=None):\n date_split_1 = str(self.from_date).split(\" \")\n date_split_2 = str(self.to_date).split(\" \")\n docs = self.env['stock.move.line'].search([('received_date', '>=', date_split_1[0] +' 00:00:00'),\n ('received_date', '<=', date_split_2[0] +' 23:59:59'),\n ('ticl_warehouse_id', '=',self.warehouse_ids.ids)])\n lst = []\n lst_2 = []\n origin =''\n for j in docs:\n lst_2.append(j.origin)\n for i in docs:\n if origin != i.origin :\n count_origin = lst_2.count(i.origin)\n if count_origin <=1 :\n lst.append({'categ_id': i.categ_id.name,\n 'product_id': i.product_id.name,\n 'condition_id': i.condition_id.name,\n 'count': count_origin,\n 'serial_number': i.serial_number,\n 'manufacturer_id': i.manufacturer_id.name,\n 'received_date': i.received_date,\n 'origin': i.origin,\n 'ticl_warehouse_id': i.ticl_warehouse_id.name })\n elif count_origin >1 :\n summary_log = self.env['ticl.receipt'].search([('name', '=', i.origin)]).ticl_receipt_lines\n for inv in summary_log:\n lst.append({'categ_id': inv.tel_type.name,\n 'product_id': inv.product_id.name,\n 'condition_id': inv.condition_id.name,\n 'count': inv.count_number,\n 'serial_number': inv.serial_number,\n 'manufacturer_id': inv.manufacturer_id.name,\n 'received_date': i.received_date,\n 'origin': i.origin,\n 'ticl_warehouse_id': i.ticl_warehouse_id.name})\n origin = i.origin\n return lst\n","sub_path":"ticl_inventory_xls_report/wizard/stock_summary_report.py","file_name":"stock_summary_report.py","file_ext":"py","file_size_in_byte":11616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"437078062","text":"import torch\nimport numpy as np\nimport time\nimport torch.nn as nn\nfrom tqdm.notebook import tqdm\nfrom modules.utils import set_seed, format_time, save_checkpoint\n\n\n# Training/Validation method\ndef bert_train_val(model, dataloaders, starting_epoch, optimizer, scheduler, epochs, device):\n print(\"\\n\\n\" + \"-\" * 15)\n print(\"| TRAINING... |\")\n print(\"-\" * 15)\n set_seed()\n start_training_time = time.time()\n\n # Define running history for train and val\n train_loss_history = []\n val_loss_history = []\n train_acc_history = []\n val_acc_history = []\n\n # Training loop\n for epoch in range(starting_epoch, epochs):\n train_loss = 0\n train_acc = 0\n model.train()\n for step, batch in tqdm(enumerate(dataloaders['train_dataloader']), total=len(dataloaders['train_dataloader'])):\n # Load and feed data to model\n input_ids = batch[0].to(device)\n attention_masks = batch[1].to(device)\n labels = batch[2].to(device)\n\n model.zero_grad()\n\n outputs = model(input_ids, labels=labels, attention_mask=attention_masks)\n loss = outputs.loss\n logits = outputs.logits\n\n batch_loss = loss.item()\n train_loss += batch_loss\n \n logits = logits.detach().cpu().numpy()\n labels = labels.to('cpu').numpy()\n\n predictions = np.argmax(logits, axis=1).flatten()\n # labels = labels.flatten()\n\n correct = 0\n for i in range(0, len(predictions)):\n if predictions[i] == labels[i]:\n correct = correct + 1\n batch_accuracy = correct / len(labels)\n train_acc += batch_accuracy\n\n if step % 100 == 0:\n print(\"Epoch: \", epoch + 1, \"/\", epochs, \"Batch: \", step + 1, \"/\", len(dataloaders['train_dataloader']),\n \"Loss: \", train_loss / (step + 1), \"Accuracy: \", batch_accuracy)\n\n loss.backward()\n # Apply gradient clipping\n nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n # Optimzer/Learning rate schedular step\n optimizer.step()\n scheduler.step()\n\n torch.cuda.empty_cache()\n\n # Loss and accuracy results by epoch\n end_epoch_time = time.time()\n epoch_train_accuracy = train_acc / len(dataloaders['train_dataloader'])\n epoch_train_loss = train_loss / len(dataloaders['train_dataloader'])\n epoch_train_time = format_time(start_training_time, end_epoch_time)\n train_loss_history.append(epoch_train_loss)\n train_acc_history.append(epoch_train_accuracy)\n\n print(\n f' epoch: {epoch + 1}, train loss: {epoch_train_loss:.6f}, train accuracy: {epoch_train_accuracy:.6f}, train time:{epoch_train_time}')\n\n # Switch to evaluation mode and run validation\n print(\"Validating...\")\n\n start_val_time = time.time()\n model.eval()\n val_loss = 0\n val_acc = 0\n with torch.no_grad():\n for step, batch in tqdm(enumerate(dataloaders['val_dataloader']), total=len(dataloaders['val_dataloader'])):\n # Load and feed data to model\n input_ids = batch[0].to(device)\n attention_masks = batch[1].to(device)\n labels = batch[2].to(device)\n\n model.zero_grad()\n\n outputs = model(input_ids, labels=labels, attention_mask=attention_masks)\n loss = outputs.loss\n logits = outputs.logits\n\n batch_loss = loss.item()\n val_loss += batch_loss\n\n logits = logits.detach().cpu().numpy()\n labels = labels.to('cpu').numpy()\n\n predictions = np.argmax(logits, axis=1).flatten()\n\n correct = 0\n for i in range(0, len(predictions)):\n if predictions[i] == labels[i]:\n correct = correct + 1\n\n batch_accuracy = correct / len(labels)\n val_acc += batch_accuracy\n\n torch.cuda.empty_cache()\n end_val_time = time.time()\n\n epoch_val_time = format_time(start_val_time, end_val_time)\n epoch_val_loss = val_loss / len(dataloaders['val_dataloader'])\n epoch_val_acc = val_acc / len(dataloaders['val_dataloader'])\n val_loss_history.append(epoch_val_loss)\n val_acc_history.append(epoch_val_acc)\n\n print(\n f' epoch: {epoch + 1}, val loss: {epoch_val_loss:.6f}, val accuracy: {epoch_val_acc:.6f}, val_time: {epoch_val_time}')\n\n # Record results to dictionary to return\n performance_history = {'train_loss': train_loss_history, 'val_loss': val_loss_history,\n 'train_accuracy': train_acc_history, 'val_accuracy': val_acc_history, 'num_epochs': epochs}\n\n # Save model checkpoint at end of train_val run, also saves performance history\n if epoch == epochs - 1:\n checkpoint = {\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n 'performance_history': performance_history,\n 'epoch': epoch + 1,\n }\n save_checkpoint(checkpoint, f\"./BERTcheckpoint_{checkpoint['epoch']}.pth.tar\")\n print(\"\")\n print(\"Training Finished\")\n\n return performance_history\n","sub_path":"modules/bert_train_valid.py","file_name":"bert_train_valid.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"415692708","text":"def split_stack(stack):\r\n last_elem = stack[-1]\r\n split_index = -1\r\n for i in xrange(len(stack) - 2, -1, -1):\r\n if stack[i] == last_elem:\r\n split_index -= 1\r\n else:\r\n break\r\n return stack[:split_index], stack[split_index:]\r\n\r\n\r\ndef get_answer(stack, change_to_happy=True):\r\n if change_to_happy:\r\n if 0 not in stack:\r\n return 0\r\n\r\n if 1 not in stack:\r\n return 1\r\n else:\r\n if 0 not in stack:\r\n return 1\r\n\r\n if 1 not in stack:\r\n return 0\r\n\r\n first, second = split_stack(stack)\r\n # first.reverse()\r\n if change_to_happy:\r\n if second[0] == 1:\r\n return get_answer(first)\r\n else:\r\n return 1 + get_answer(first, False)\r\n else:\r\n if second[0] == 1:\r\n return 1 + get_answer(first)\r\n else:\r\n return get_answer(first, False)\r\n\r\n\r\nout = open('out.txt', 'w')\r\n\r\nwith open(\"B-large.in\") as f:\r\n data = f.read().split('\\n')\r\n t = int(data[0])\r\n for i in xrange(1, t + 1):\r\n tmp_data = [1 if c == '+' else 0 for c in data[i]]\r\n out.write(\"Case #{}: {}\\n\".format(i, get_answer(tmp_data)))\r\n\r\nout.close()\r\n","sub_path":"codes/CodeJamCrawler/16_0_2/ezio3593/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"553024791","text":"import base64\nimport datetime\nfrom flask import Flask, render_template, request, redirect, session\nimport pymongo\nfrom bson.objectid import ObjectId\n\napp = Flask(__name__)\napp.secret_key = 'bizim cok zor gizli sozcugumuz'\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb = myclient[\"sozluk7DB\"]\nkullanicilar_tablosu = mydb[\"kullanicilar\"]\nbasliklar_tablosu = mydb[\"basliklar\"]\nyazilar_tablosu = mydb[\"yazilar\"]\n\n\n\n\n@app.route('/')\ndef baslangic():\n basliklar = basliklar_tablosu.find({}).sort([(\"_id\",pymongo.DESCENDING)])\n yazilar = yazilar_tablosu.find({}).sort([(\"_id\",pymongo.DESCENDING)]).limit(5)\n kayit = None\n if 'kullanici' in session:\n kayit = session[\"kullanici\"]\n\n return render_template(\"anasayfa.html\", kullanici=kayit, basliklar=basliklar, yazilar=yazilar)\n\n\n\n@app.route('/b//')\ndef baslik_goster(baslik_id, sayfa_no):\n kullanici = None\n baslik = None\n basliklar = basliklar_tablosu.find({}).sort([(\"_id\",pymongo.DESCENDING)])\n yazilar=[]\n if 'kullanici' in session:\n kullanici = session[\"kullanici\"]\n if baslik_id:\n sayfalama_offset = (int(sayfa_no) - 1) * 50\n baslik = basliklar_tablosu.find_one({\"_id\": ObjectId(baslik_id)})\n print(ObjectId(baslik_id))\n yazilar = list(yazilar_tablosu.find({\"baslik._id\": ObjectId(baslik_id)}).sort([(\"_id\",pymongo.DESCENDING)]).skip(sayfalama_offset).limit(51))\n sonraki_sayfa_var = True\n sonraki_sayfa_no = int(sayfa_no) + 1\n onceki_sayfa_no = int(sayfa_no) - 1\n if len(yazilar) < 51:\n sonraki_sayfa_var = False\n else:\n yazilar.pop()\n\n\n return render_template(\"baslikgoster.html\", kullanici=kullanici, baslik=baslik, basliklar=basliklar, yazilar=yazilar, sayfa_no=sayfa_no, sonraki_sayfa_var=sonraki_sayfa_var, sonraki_sayfa_no=sonraki_sayfa_no, onceki_sayfa_no=onceki_sayfa_no)\n\n\n\n@app.route('/giris', methods=['GET', 'POST'])\ndef giris():\n if request.method == 'POST':\n kullanici = request.form['kullanici']\n sifre = request.form['sifre']\n kayit = kullanicilar_tablosu.find_one({\"_id\": kullanici})\n if kayit:\n if sifre == kayit[\"sifre\"]:\n del kayit['sifre']\n session[\"kullanici\"] = kayit\n return redirect(\"/\", code=302)\n else:\n return \"Şifre yanlış\"\n else:\n return \"Kullanıcı bulunamadı\"\n else:\n return render_template(\"giris.html\")\n\n\n@app.route('/yenibaslik', methods=['GET', 'POST'])\ndef yeni_baslik():\n if request.method == 'POST':\n baslik = request.form['baslik']\n kayit = {\"metin\": baslik}\n basliklar_tablosu.insert_one(kayit)\n return redirect(\"/\", code=302)\n\n else:\n return render_template(\"yenibaslik.html\")\n\n\n@app.route('/yeniyazi/', methods=['GET', 'POST'])\ndef yeni_yazi(baslik_id):\n baslik = basliklar_tablosu.find_one({\"_id\": ObjectId(baslik_id)})\n if request.method == 'POST':\n yazi = request.form['yazi']\n kullanici = session[\"kullanici\"]\n kayit={\"baslik\": baslik, \"icerik\": yazi, \"paylasim_tarihi\": datetime.datetime.now(), \"kullanici\": kullanici}\n yazilar_tablosu.insert_one(kayit)\n return redirect(\"/b/\" + baslik_id, code=302)\n\n else:\n return render_template(\"yeniyazi.html\", baslik = baslik)\n\n\n\n@app.route('/testyazisiolustur/', methods=['GET'])\ndef test_yazi_olustur(baslik_id):\n for i in range(0,40000):\n baslik = basliklar_tablosu.find_one({\"_id\": ObjectId(baslik_id)})\n kullanici = session[\"kullanici\"]\n yazi = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum\"\n yazi = \"--\" + str(i) + \"--\" + yazi\n kayit={\"baslik\": baslik, \"icerik\": yazi, \"paylasim_tarihi\": datetime.datetime.now(), \"kullanici\": kullanici}\n yazilar_tablosu.insert_one(kayit)\n return \"yazılar oluşturuldu\"\n\n\n\n\n@app.route('/cikis')\ndef cikis():\n session.clear()\n return redirect(\"/\", code=302)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"620858405","text":"\"\"\" 消息关键字回复 \"\"\"\nimport time\nfrom app.models import Query\nfrom . import DissCall\n\ndiss_call_keywords = ['骚扰电话', '骚扰号码']\n\n\ndef keywords_parser(msg):\n \"\"\"\n 处理关键字命中规则\n\n :param msg: 解析后的消息字典(用户)\n :return msg: kw业务处理后的消息字典(返回)\n \"\"\"\n # 获取数据,方便使用\n phone = msg['Content']\n query_id = msg['FromUserName']\n\n # 查询query\n query = Query.filter_by_id(query_id)\n\n if query:\n # query存在,优先处理query\n if query.action == 'diss_call':\n # 开始diss_call骚扰,先使用DissCall验证输入的号码\n phone_num = DissCall.check_phone(phone)\n\n if phone_num:\n # 电话验证通过, 提交骚扰\n if DissCall.start_call(phone_num):\n # 提交成功\n\n expire = query.expire - int(time.time())\n\n msg['Content'] = '成功DISS{phone}一次,已将其加入DISS骚扰队列。你可在{expire}秒内继续添加骚扰号码。'.format(\n phone=phone, expire=expire)\n return msg\n else:\n # 提交失败\n msg['Content'] = 'DISS{phone}失败,请稍后重试!'.format(phone=phone)\n return msg\n\n else:\n # 电话验证不通过\n msg['Content'] = '请输入合法的电话号码:'\n return msg\n elif msg['Content'] in diss_call_keywords:\n # query不存在,但命中diss_call关键字\n # 添加diss_call的query, 360s后过期\n action = Query(query_id, 'diss_call', 360)\n\n # 组织msg\n msg['Content'] = '请添加骚扰过您的电话:'\n return msg\n else:\n # 未命中任何关键字\n if msg['MsgType'] == 'text':\n # 仅当文本类型为text时\n msg['Content'] = '不支持指令:{content}。若需腹黑骚扰,请先回复“骚扰号码”或“骚扰电话”触发骚扰指令,然后输入骚扰过你的号码。(千万别拿自己或好友的号码来测试,不对其后果负责)'.format(\n content=msg['Content'])\n return msg\n else:\n # 文本类型为event或image等,返回MsgParser处理后的内容\n return msg","sub_path":"app/wexin/KwParser.py","file_name":"KwParser.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"307585050","text":"#!/usr/bin/python\n\nimport wx\n\n\nclass Frame(wx.Frame):\n def __init__(self, parent, id, title):\n style = wx.DEFAULT_FRAME_STYLE ^ (wx.RESIZE_BORDER)\n wx.Frame.__init__(self, parent, id, title=title, size=(300, 200), style=style)\n\n #center the frame\n self.Center()\n\n #create panel\n self.panel = wx.Panel(self)\n self.panel.SetBackgroundColour(\"Black\")\n\n #create menu bar\n self.CreateMenuBar()\n\n #create status bar\n self.statusbar = self.CreateStatusBar()\n self.statusbar.SetFieldsCount(2)\n self.statusbar.SetStatusText(\"No image specified\", 1)\n\n self.bitmap = None\n\n def CreateMenuBar(self):\n menubar = wx.MenuBar()\n\n menufile = wx.Menu()\n menuopen = menufile.Append(wx.ID_OPEN, \"&Open Image\", \"Open a picture\")\n self.Bind(wx.EVT_MENU, self.OnOpen, menuopen)\n\n self.menumirror = menufile.Append(-1, \"&Mirror Image\", \"Mirror the image\")\n self.Bind(wx.EVT_MENU, self.OnMirror, self.menumirror)\n self.menumirror.Enable(False)\n\n menufile.AppendSeparator()\n menuexit = menufile.Append(wx.ID_EXIT, \"E&xit\", \"Exit the viewer\")\n self.Bind(wx.EVT_MENU, self.OnExit, menuexit)\n\n menubar.Append(menufile, \"&File\")\n\n menuhelp = wx.Menu()\n menuabout = menuhelp.Append(wx.ID_ABOUT, \"&About\", \"About the viewer\")\n self.Bind(wx.EVT_MENU, self.OnAbout, menuabout)\n\n menubar.Append(menuhelp, \"&Help\")\n self.SetMenuBar(menubar)\n\n def OnOpen(self, e):\n dlg = wx.FileDialog(self, \"Choose an image\", \"\", \"\", \"*.jpg;*.gif;*.png\", wx.OPEN)\n if dlg.ShowModal() == wx.ID_OK:\n filename = dlg.GetPath()\n self.SetTitle(filename)\n wx.BeginBusyCursor()\n self.image = wx.Image(filename, wx.BITMAP_TYPE_ANY, -1)\n self.statusbar.SetStatusText(\"Size = %s\" % (str(self.image.GetSize())), 1)\n self.ShowBitmap()\n self.menumirror.Enable(True)\n wx.EndBusyCursor()\n dlg.Destroy()\n\n def ShowBitmap(self):\n if self.bitmap is not None:\n self.bitmap.Destroy()\n self.bitmap = wx.StaticBitmap(self.panel, -1, wx.BitmapFromImage(self.image))\n self.SetClientSize(self.bitmap.GetSize())\n self.Center()\n\n def OnMirror(self, e):\n self.image = self.image.Mirror()\n self.ShowBitmap()\n self.Refresh()\n\n def OnExit(self, e):\n self.Destroy()\n\n def OnAbout(self, e):\n dlg = wx.MessageDialog(self, \"Test Image Viewer\", \"About\", wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n\n\nclass App(wx.App):\n def OnInit(self):\n self.frame = Frame(parent=None, id=-1, title=\"Image Viewer\")\n self.frame.Show()\n self.SetTopWindow(self.frame)\n return True\n\nif __name__ == \"__main__\":\n app = App(redirect=False)\n app.MainLoop()\n","sub_path":"frame_image.py","file_name":"frame_image.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"622161594","text":"from django.shortcuts import render, redirect\nfrom rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm\nfrom rango.models import Category, Page, UserProfile\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import datetime\nfrom registration.backends.simple.views import RegistrationView\n\n\n\"\"\"All view functions must take at least one parameter (typically \"request\")\"\"\"\n\n\ndef get_server_side_cookie(request, cookie, default_value=None):\n print(request.COOKIES)\n value = request.session.get(cookie)\n if not value:\n value = default_value\n print(\"\\n\\nValue not found!!!\\n\\nDefault: {}\".format(default_value))\n return value\n\n\ndef visitor_cookie_handler(request):\n visits = int(get_server_side_cookie(request, 'visits', '1'))\n print(visits)\n last_visit_cookie = get_server_side_cookie(request, 'last_visit', str(datetime.now()))\n last_visit_time = datetime.strptime(last_visit_cookie[:-7], '%Y-%m-%d %H:%M:%S')\n\n print(last_visit_time)\n\n if (datetime.now() - last_visit_time).seconds > 0:\n visits += 1\n request.session['last_visit'] = str(datetime.now())\n # else:\n # pass\n # visits = 1\n # response.set_cookie('last_visit', last_visit_cookie)\n else:\n request.session['last_visit'] = last_visit_cookie\n request.session['visits'] = visits\n print('Request session (visits): ' + str(request.session['visits']))\n\n\n# def visitor_cookie_handler(request, response):\n# visits = int(request.COOKIES.get('visits', '1'))\n# last_visit_cookie = request.COOKIES.get('last_visit', str(datetime.now()))\n# last_visit_time = datetime.strptime(last_visit_cookie[:-7], '%Y-%m-%d %H:%M:%S')\n#\n# if (datetime.now() - last_visit_time).seconds > 0:\n# print('visits are incremented!')\n# visits += 1\n# response.set_cookie('last_visit', str(datetime.now()))\n# else:\n# #visits = 1\n# response.set_cookie('last_visit', last_visit_cookie)\n# response.set_cookie('visits', visits)\n\n\ndef index(request):\n request.session.set_test_cookie()\n\n category_list = Category.objects.order_by('-likes')[:5]\n\n page_list = Page.objects.order_by('-views')[:5]\n # cookies above (visits)!\n context_dict = {'categories': category_list, 'pages': page_list, } # 'visits': request.session['visits'], }\n\n visitor_cookie_handler(request)\n\n context_dict['visits'] = request.session['visits']\n\n response = render(request, 'rango/index.html', context=context_dict)\n\n return response\n\n\ndef user_profile(request):\n return render(request, 'rango/user_profile.html')\n\n\ndef show_category(request, category_name_slug):\n context_dict = {}\n try:\n category = Category.objects.get(slug=category_name_slug)\n pages = Page.objects.filter(category=category).order_by('-views')\n context_dict['pages'] = pages\n context_dict['category'] = category\n except Category.DoesNotExist:\n context_dict['pages'] = None\n context_dict['category'] = None\n\n return render(request, 'rango/category.html', context_dict)\n\n\ndef about(request):\n if request.session.test_cookie_worked():\n print(\"Test cookie worked!\")\n request.session.delete_test_cookie()\n\n context_dict = {'name': request.user}\n return render(request, 'rango/about.html', context_dict)\n\n\ndef all_categories(request):\n category_list = Category.objects.order_by('-likes')[:]\n context_dict = {'categories': category_list}\n return render(request, 'rango/all_categories.html', context_dict)\n\n\n@login_required\ndef add_category(request):\n form = CategoryForm()\n # HTTP POST??\n if request.method == 'POST':\n form = CategoryForm(request.POST)\n if form.is_valid():\n category = form.save(commit=True)\n print('\\nCategory was successfully created! \\nName: {}\\nSlug: {}\\n'.format(category, category.slug))\n return render(request, 'rango/add_category_complete.html')\n else:\n print(\"ERROR!!!\")\n print(form.errors)\n print(\"ERROR!!!1!\")\n return render(request, 'rango/add_category.html', {'form': form})\n\n\n@login_required\ndef add_page(request, category_name_slug):\n try:\n category = Category.objects.get(slug=category_name_slug)\n except Category.DoesNotExist:\n category = None\n\n form = PageForm()\n if request.method == 'POST':\n form = PageForm(request.POST)\n if form.is_valid():\n if category:\n page = form.save(commit=False)\n page.category = category\n page.views = 0\n page.save()\n print('\\nPage was successfully added111! \\nTitle: {}\\nURL: {}'.format(page.title, page.url))\n return show_category(request, category_name_slug)\n # print('\\nPage was successfully added! \\nTitle: {}\\nURL: {}'.format(page.title, page.url))\n else:\n print(\"ERROR!!!\")\n print(form.errors)\n print(\"ERROR!!!1!\")\n context_dict = {'form': form, 'category': category}\n return render(request, 'rango/add_page.html', context_dict)\n\n\n# @login_required\n# def register_profile(request):\n# form = UserProfileForm()\n# if request.method == 'POST':\n# form = UserProfileForm(request.POST, request.FILES)\n# if form.is_valid():\n# user_profile = form.save(commit=False)\n# user_profile.user = request.user\n# user_profile.save()\n#\n# return HttpResponseRedirect('index')\n# else:\n# print(form.errors)\n#\n# context_dict = {'form': form}\n#\n# return render(request, 'rango/profile_registration.html', context_dict)\n\n\ndef register(request):\n registered = False\n\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileForm(data=request.POST)\n\n if user_form.is_valid() and profile_form.is_valid():\n user = user_form.save()\n\n user.set_password(user.password)\n user.save()\n\n profile = profile_form.save(commit=False)\n profile.user = user\n\n if 'picture' in request.FILES:\n profile.picture = request.FILES['picture']\n\n profile.save()\n\n registered = True\n\n else:\n print(user_form.errors, profile_form.errors)\n else:\n user_form = UserForm()\n profile_form = UserProfileForm()\n\n return render(request, 'registration/registration_form.html',\n {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})\n\n#\n# def user_login(request):\n# if request.method == 'POST':\n# username = request.POST.get('username')\n# password = request.POST.get('password')\n#\n# user = authenticate(username=username, password=password)\n# if user:\n# if user.is_active:\n# login(request, user)\n# return HttpResponseRedirect(reverse('index'))\n# else:\n# return HttpResponse(\"Your rango account is disabled.\")\n#\n# else:\n# print(\"Invalid login details: {}, {}\".format(username, password))\n# return HttpResponse(\"Invalid login details supplied.\")\n# else:\n# return render(request, 'rango/login.html', {})\n#\n#\n# @login_required\n# def user_logout(request):\n# logout(request)\n# return HttpResponseRedirect(reverse(index))\n\n\ndef list_profiles(request):\n userprofile_list = UserProfile.objects.all()\n return render(request, 'rango/list_profiles.html', {'userprofile_list': userprofile_list})\n\n\n@login_required\ndef restricted(request):\n return render(request, 'rango/restricted.html', {})\n\n\n# class RangoRegistrationView(RegistrationView):\n# def get_success_url(self, user):\n# return reverse('register_profile')\n\ndef track_url(request):\n page_id = None\n if request.method == 'GET':\n if 'page_id' in request.GET:\n page_id = request.GET['page_id']\n print('success!!!1')\n if page_id:\n try:\n page = Page.objects.get(id=page_id)\n page.views += 1\n page.save()\n return redirect(page.url)\n except:\n return HttpResponse(\"Page id {0} not found\".format(page_id))\n return redirect(reverse('index'))\n","sub_path":"rango/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"526162377","text":"from flask import Flask, jsonify, request\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\nfrom flask_cors import CORS\nimport json\n\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/add_user_to_binding', methods=['POST'])\ndef api_post():\n try:\n data = json.loads(request.data.decode('utf-8'))\n except Exception as exc:\n print(str(exc))\n return jsonify(\"Cannot extract data\"), 503\n iam_api(data)\n return \"End\"\n\ndef iam_api(req):\n project_id = req.get(\"project_id\")\n access = req.get(\"role\")\n member = \"user:\" + req.get(\"e_mail\")\n credentials = GoogleCredentials.get_application_default()\n service = discovery.build('iam', 'v1', credentials=credentials)\n\n reqe = service.roles().list(pageSize=1000)\n response = reqe.execute()\n\n service = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)\n policy = service.projects().getIamPolicy(resource=project_id, body={}).execute()\n\n\n role_exist = get_roles(response, access, reqe, credentials)\n if role_exist == None:\n return jsonify(\"role does not exist\")\n else:\n modify_policy_add_member(policy, access, member)\n if modify_policy_add_member == None:\n return jsonify(\"role cannot be added\")\n else:\n set_policies = set_policy(credentials, project_id, policy)\n if set_policies == \"bad\":\n return jsonify(\"change cannot be uploaded\")\n else:\n return jsonify(\"Successful\")\n\n\ndef get_roles(response, access, reqe, credentials):\n service = discovery.build('iam', 'v1', credentials=credentials)\n for role in response.get('roles', []):\n title = role.get('title')\n if access == title:\n print(role)\n return role\n else:\n reqe = service.roles().list_next(previous_request=reqe, previous_response=response)\n\n if request is None:\n print(\"Role is not available\")\n return None\n\n\ndef modify_policy_add_member(policy, access, member):\n try:\n access = access.lower()\n role = \"roles/\" + access\n binding = next(b for b in policy['bindings'] if b['role'] == role)\n binding['members'].append(member)\n print(binding)\n except:\n print(\"User cannot be added to the bindings\")\n return None\n\n\ndef set_policy(credentials, project_id, policy):\n try:\n service = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)\n policy = service.projects().setIamPolicy(resource=project_id, body={'policy': policy}).execute()\n print(policy)\n return policy\n except:\n print(\"Change cannot be set\")\n return \"bad\"\n\n\nif __name__ == '__main__':\n #app.run( debug=True)\n app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))","sub_path":"PycharmProjects/IAM_to_datastore/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"204171177","text":"# -*- coding: utf-8 -*-\n\n\nimport math \n\n\"\"\"Este ficheiro verifica se as coordenadas das mesas dadas na Terceira Implementacao por Documento são viáveis\"\"\"\n\ndef separar_as_coordenadas(lista_2, lista_verificar_circ, lista_verificar_rect):\n \"\"\"Esta função separa as coordenadas em listas\"\"\"\n for w in lista_2:\n if w[0] == 'Retangulo':\n (lista_verificar_rect).append(w)\n if w[0] == 'Circulo':\n (lista_verificar_circ).append(w)\n return lista_verificar_rect, lista_verificar_circ\n\ndef verificar_circulo(lista_rect, lista_circ, lista_2, lista_verificar_circ, lista_verificar_rect, lista_coords_x_circ,lista_coords_y_circ,mesa_raio, robot_raio, lista_coords_rect_x, lista_coords_rect_y, lista_distancia):\n \"\"\"Esta função verifica se as coordenadas das mesas circulares são válidas\"\"\"\n for w in lista_verificar_circ:\n x_circulo = int(w[1])\n y_circulo = int(w[2])\n raio = int(w[3])\n if ver_circ(lista_coords_x_circ,lista_coords_y_circ,mesa_raio, x_circulo, y_circulo, robot_raio, raio):\n k = 0#Existe pelo menos uma mesa mal posicionada\n return k#Pois basta uma mesa estar mal posicionada para a função parar de correr\n else:\n k = 1\n lista_coords_x_circ.append(x_circulo)#Dar append em listas para de seguida ser verificado a distancia\n lista_coords_y_circ.append(y_circulo)#Também com essas listas\n mesa_raio.append(raio)\n return k\n\ndef verificar_retangulo(lista_rect, lista_circ, lista_2, lista_verificar_circ, lista_verificar_rect, lista_coords_x_circ,lista_coords_y_circ,mesa_raio, robot_raio, lista_coords_rect_x, lista_coords_rect_y, lista_distancia):\n \"\"\"Esta função verifica se as coordenadas das mesas retangulares são válidas\"\"\"\n for w in lista_verificar_rect:\n distancia_x = abs(int(w[1])-int(w[3]))#Ver qual dos \n distancia_y = abs(int(w[2])-int(w[4]))\n if (distancia_x > distancia_y):#Ver qual dos lados do retangulo é maior \n distancia = distancia_x/2#Em termos de calculos, utiliza-se a distância maior\n else:\n distancia = distancia_y/2\n x_retangulo = int(w[1])+ distancia_x/2#X e Y do centro do retângulo\n y_retangulo = int(w[2]) + distancia_y/2\n if ver_rect(lista_coords_rect_x,lista_coords_rect_y,lista_coords_x_circ,lista_coords_y_circ,mesa_raio,y_retangulo,x_retangulo,distancia,robot_raio, lista_distancia):\n k = 0#Cada vez que uma mesa falha, o k é igual a 0 e para a função\n return k\n else:\n if ver_rect_rect(lista_coords_rect_x,lista_coords_rect_y,lista_coords_x_circ,lista_coords_y_circ,mesa_raio,y_retangulo,x_retangulo,distancia,robot_raio, lista_distancia):\n k = 0\n return k\n else:\n lista_coords_rect_x.append(x_retangulo)\n lista_coords_rect_y.append(y_retangulo)\n lista_distancia.append(distancia)\n k = 1\n return k\n\ndef separar_coords(lista_rect, lista_circ, lista_2, lista_verificar_circ, lista_verificar_rect, lista_coords_x_circ,lista_coords_y_circ,mesa_raio, robot_raio, lista_coords_rect_x, lista_coords_rect_y, lista_distancia):\n \"\"\"Esta função chama a função que separa as coordenadas e verifica se as coordenadas são viáveis\"\"\"\n lista_verificar_rect, lista_verificar_circ = separar_as_coordenadas(lista_2, lista_verificar_circ, lista_verificar_rect)\n k = verificar_circulo(lista_rect, lista_circ, lista_2, lista_verificar_circ, lista_verificar_rect, lista_coords_x_circ,lista_coords_y_circ,mesa_raio, robot_raio, lista_coords_rect_x, lista_coords_rect_y, lista_distancia)\n if k == 0:#Primeiro verificar se as mesas estão bem posicionadas em relação às circulares\n return k\n else:#se estiverem, verificam-se então as mesas retangulares\n k = verificar_retangulo(lista_rect, lista_circ, lista_2, lista_verificar_circ, lista_verificar_rect, lista_coords_x_circ,lista_coords_y_circ,mesa_raio, robot_raio, lista_coords_rect_x, lista_coords_rect_y, lista_distancia)\n return k\n \ndef ver_circ(lista_coords_x_circ,lista_coords_y_circ,mesa_raio,x_circulo,y_circulo,robot_raio,raio):\n \"\"\"Função que verifica se as coordenadas da potencial 'futura' mesa circular está à distância certa\n de todas as outras mesas circulares existentes\"\"\"\n e = -1\n for w in lista_coords_x_circ:\n e = e +1\n if math.sqrt(((lista_coords_x_circ[e]-x_circulo)**2)+((lista_coords_y_circ[e]-y_circulo)**2))< mesa_raio[e]+5+robot_raio +raio:\n return True#se der return True é porque a distância não é a suficiente\n \n \ndef ver_rect(lista_coords_rect_x,lista_coords_rect_y,lista_coords_x_circ,lista_coords_y_circ,mesa_raio,y_retangulo,x_retangulo,distancia,robot_raio, lista_distancia):\n \"\"\"Função que verifica se as coordenadas da potencial 'futura' mesa retangular está à distância certa\n de todas as outras mesas circulares existentes\"\"\"\n h=-1\n for w in lista_coords_x_circ:\n h = h +1\n if math.sqrt(((lista_coords_x_circ[h]-x_retangulo)**2)+((lista_coords_y_circ[h]-y_retangulo)**2))< distancia + mesa_raio[h] +robot_raio+5:\n return True#se der return True é porque a distância não é a suficiente\n \ndef ver_rect_rect(lista_coords_rect_x,lista_coords_rect_y,lista_coords_x_circ,lista_coords_y_circ,mesa_raio,y_retangulo,x_retangulo,distancia,robot_raio, lista_distancia):\n \"\"\"Função que verifica se as coordenadas da potencial 'futura' mesa retangular está à distância certa\n de todas as outras mesas retangulares existentes\"\"\"\n t = -1\n for x in lista_coords_rect_x:\n t = t+1\n if math.sqrt(((lista_coords_rect_x[t]-x_retangulo)**2)+((lista_coords_rect_y[t]-y_retangulo)**2))< distancia + lista_distancia[t]+robot_raio+5:\n return True#se der return True é porque a distância não é a suficiente\n\ndef verificar_coordenadas(lista_rect, lista_circ, lista_2):\n \"\"\"Função principal\"\"\"\n k = separar_coords(lista_rect, lista_circ, lista_2, [], [],[],[],[], 3, [],[],[])\n return k\n","sub_path":"Verificar_Coordenadas_Terceira_Documento.py","file_name":"Verificar_Coordenadas_Terceira_Documento.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"373467389","text":"\"\"\"\nCS3B, Assignment #10, Online Dictionary\nUlises Marian\nTesting\n\"\"\"\n\nimport unittest\nimport json\nfrom assignment10 import *\n\nclass TimeFuncTest(unittest.TestCase):\n def testTimeFunc(self):\n #1 test\n expected = (340282366920938463463374607431768211456,\n 2.303000000009048e-06)\n actual = result, duration = time_func(pow, 2, 128)\n self.assertEqual(expected[0], actual[0]) #result\n\n # self.assertAlmostEqual(expected[1], actual[1]) #duration\n\n #2 test\n expected2 = (None, 4.4303999999995014e-05)\n actual2 = result, duration = time_func(print, \"hello\", \"world\",\n sep=\"\\n\")\n self.assertEqual(expected2[0], actual2[0]) #result\n\n # self.assertAlmostEqual(expected2[1], actual2[1]) #duration\n\n #3 test\n expected = (777, 9.570000000236334e-07)\n actual = result, duration = time_func(abs, 777)\n self.assertEqual(expected[0], actual[0])\n\nclass OxfordDictionaryTest(unittest.TestCase):\n dictionary = OxfordDictionary()\n def testSearch(self):\n #1 word, no example\n word = \"cheetah\"\n entry = self.dictionary.search(word)\n self.assertEqual(word, entry.word)\n # no example, cheetah\n example = None\n entry_example = entry.example\n self.assertEqual(example, entry.example)\n\n #2 word w/example\n word = \"red\"\n entry = self.dictionary.search(word)\n self.assertEqual(word, entry.word)\n\n part_of_speech = \"adjective\"\n part_of_speech_entry = entry.part_of_speech\n self.assertEqual(part_of_speech, part_of_speech_entry)\n\n example = \"her red lips\"\n entry_example = entry.example\n self.assertEqual(example, entry.example)\n\n #3 word w/example\n word = \"light\"\n entry = self.dictionary.search(word)\n self.assertEqual(word, entry.word)\n\n part_of_speech = \"noun\"\n part_of_speech_entry = entry.part_of_speech\n self.assertEqual(part_of_speech, part_of_speech_entry)\n\n definition = \"the natural agent that stimulates sight \" \\\n \"and makes things visible\"\n definition_entry = entry.definition\n self.assertEqual(definition, definition_entry)\n\n example = \"the light of the sun\"\n entry_example = entry.example\n self.assertEqual(example, entry.example)\n\n #4 word, no example\n word = \"tiger\"\n entry = self.dictionary.search(word)\n self.assertEqual(word, entry.word)\n\n part_of_speech = \"noun\"\n part_of_speech_entry = entry.part_of_speech\n self.assertEqual(part_of_speech, part_of_speech_entry)\n\n definition = \"a very large solitary cat with a yellow-brown coat\" \\\n \" striped with black, native to the forests of Asia\" \\\n \" but becoming increasingly rare.\"\n definition_entry = entry.definition\n self.assertEqual(definition, definition_entry)\n\n example = None\n entry_example = entry.example\n self.assertEqual(example, entry.example)\n\n def testSearchFailure(self):\n with self.assertRaises(KeyError):\n self.dictionary.search(\"correkt\")\n\n with self.assertRaises(KeyError):\n self.dictionary.search(\"neber wrung\")\n\n with self.assertRaises(KeyError):\n self.dictionary.search(\"dneiqond\")\n\n\nclass DiciontaryTest(unittest.TestCase):\n def testSourceAndDuration(self):\n #1 example\n dictionary = Dictionary()\n #source should be OXFORD_ONLINE\n word = \"movie\"\n expected_source = \"OXFORD_ONLINE\"\n actual_source = dictionary.search(word)\n actual_duration = actual_source[2]\n self.assertEqual(expected_source, actual_source[1].name)\n #duration\n #source should be Cache\n word = \"movie\"\n expected_source2 = \"CACHE\"\n actual_source2 = dictionary.search(word)\n actual_duration2 = actual_source2[2]\n #found in CACHE\n self.assertEqual(expected_source2, actual_source2[1].name)\n #cache much faster\n self.assertLess(actual_duration2, actual_duration)\n\n #2 example\n dictionary = Dictionary()\n # source should be OXFORD_ONLINE\n word = \"soccer\"\n expected_source = \"OXFORD_ONLINE\"\n actual_source = dictionary.search(word)\n actual_duration = actual_source[2]\n self.assertEqual(expected_source, actual_source[1].name)\n # duration\n # source should be Cache\n word = \"soccer\"\n expected_source2 = \"CACHE\"\n actual_source2 = dictionary.search(word)\n actual_duration2 = actual_source2[2]\n # found in CACHE\n self.assertEqual(expected_source2, actual_source2[1].name)\n # cache much faster\n self.assertLess(actual_duration2, actual_duration)\n\n #3 example\n dictionary = Dictionary()\n # source should be OXFORD_ONLINE\n word = \"fast\"\n expected_source = \"OXFORD_ONLINE\"\n actual_source = dictionary.search(word)\n actual_duration = actual_source[2]\n self.assertEqual(expected_source, actual_source[1].name)\n # duration\n # source should be Cache\n word = \"fast\"\n expected_source2 = \"CACHE\"\n actual_source2 = dictionary.search(word)\n actual_duration2 = actual_source2[2]\n # found in CACHE\n self.assertEqual(expected_source2, actual_source2[1].name)\n # cache much faster\n self.assertLess(actual_duration2, actual_duration)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"10_test.py","file_name":"10_test.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"385933635","text":"import os\nfrom flask import Flask, render_template, url_for, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom forms import DeleteForm, AddForm, AddFormRoom\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret_key!'\n\n#############################\n### SQL DATABASE SECTION ####\n#############################\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////'+os.path.join(basedir, 'db.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nMigrate(app, db)\n\n#############################\n### Models ##################\n#############################\n\nclass Person(db.Model):\n __tablename__ = 'persons'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text)\n #one-to-one ralation\n #one Person to one Room\n room = db.relationship('Room', backref='person', uselist=False)\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n if self.room:\n return f\"{self.room} belong {self.name.title()} (id: {self.id})\"\n else:\n return f\"Person is {self.name.title()} (id: {self.id})\"\n\nclass Room(db.Model):\n __tablename__ = 'rooms'\n id = db.Column(db.Integer, primary_key= True)\n name = db.Column(db.Text)\n person_id = db.Column(db.Integer, db.ForeignKey('persons.id'))\n\n def __init__(self, name, person_id):\n self.name = name\n self.person_id = person_id\n\n def __repr__(self):\n return f\"Room name : {self.name} belong {self.person_id}\"\n\n\n###############################\n####### Views #################\n###############################\n\n@app.route('/')\ndef index():\n return render_template('home.html')\n\n@app.route('/list')\ndef list_persons():\n persons = Person.query.all()\n return render_template('list.html', persons = persons)\n\n\n@app.route('/delete', methods=['GET', 'POST'])\ndef delete_person():\n form = DeleteForm()\n\n if form.validate_on_submit():\n id = form.id.data\n person = Person.query.get(id)\n db.session.delete(person)\n db.session.commit()\n return redirect(url_for('list_persons'))\n return render_template('delete.html', form=form)\n\n@app.route('/add', methods=['GET', 'POST'])\ndef add_person():\n form = AddForm()\n\n if form.validate_on_submit():\n name = form.name.data\n\n new_person = Person(name)\n db.session.add(new_person)\n db.session.commit()\n\n return redirect(url_for('list_persons'))\n return render_template('add.html', form=form)\n\n@app.route('/add_room', methods=['GET', 'POST'])\ndef add_room():\n form = AddFormRoom()\n\n if form.validate_on_submit():\n name = form.name.data\n person_id = form.person_id.data\n new_room = Room(name, person_id)\n db.session.add(new_room)\n db.session.commit()\n return redirect(url_for('list_persons'))\n return render_template('add_room.html', form=form)\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"project-social-3db_in views/adoption_site.py","file_name":"adoption_site.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"440628728","text":"from bs4 import BeautifulSoup\nimport urllib\nfrom xml.etree import ElementTree as ET\nimport pprint\nimport sys\nfrom tabulate import tabulate\n\nimport get_optimal as util\n\ndef getFinalStandings(leagueId, yearId):\n\n url = 'http://games.espn.com/ffl/tools/finalstandings?leagueId=%d&seasonId=%d' % (leagueId, yearId)\n\n r = urllib.urlopen(url).read()\n soup = BeautifulSoup(r,'lxml')\n tables = soup.find_all('table')\n\n table = util.table2listdict(tables[1],1)\n\n return table\n\ndef all_time_records(leagueId, first_year, last_year):\n\n years = range(first_year, last_year+1)\n\n stats = {}\n\n for y in years:\n standings = getFinalStandings(leagueId, y)\n\n for team in standings:\n\n owner = team['OWNER(S)']\n\n if owner not in stats:\n stats[owner] = {'owner': owner,\n 'wins' : 0,\n 'losses': 0,\n 'pf': 0,\n 'pa': 0,\n 'ranking': []}\n\n stats[owner]['wins'] += int(team['REC'].split('-')[0])\n stats[owner]['losses'] += int(team['REC'].split('-')[1])\n stats[owner]['pf'] += float(team['PF'])\n stats[owner]['pa'] += float(team['PA'])\n stats[owner]['ranking'].append(int(team['RANK']))\n\n gtotal = float(stats[owner]['wins'] + stats[owner]['losses'])\n stats[owner]['win_perc'] = stats[owner]['wins'] / gtotal\n stats[owner]['pf_pg'] = stats[owner]['pf'] / gtotal\n stats[owner]['pa_pg'] = stats[owner]['pa'] / gtotal\n stats[owner]['avg_rank'] = sum(stats[owner]['ranking']) / float(len(stats[owner]['ranking']))\n\n return stats\n\ndef print_all_time(all_time_stats):\n\n headers = ['owner','win_perc','wins','losses','pf_pg','pa_pg','avg_rank','ranking']\n data = []\n for k,v in all_time_stats.iteritems():\n data.append([v[h] for h in headers])\n data = sorted(data,key=lambda l:l[1], reverse=True)\n print(tabulate(data, headers=headers, floatfmt=\".3f\"))\n\nif __name__ == '__main__':\n\n leagueId = int(sys.argv[1])\n yearId1 = int(sys.argv[2])\n yearId2 = int(sys.argv[3])\n\n stats = all_time_records(leagueId, yearId1, yearId2)\n\n print_all_time(stats)\n","sub_path":"get_records.py","file_name":"get_records.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"549766292","text":"import torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport scipy\nfrom scipy import stats\n#import seaborn as sns\ntotalnumber=2000\nn_data=torch.ones(totalnumber,2)\nx0=torch.normal(2*n_data,1)\n\ny0=torch.zeros(totalnumber)\nx1=torch.normal(-2*n_data,1)\ny1=torch.ones(totalnumber)\nxold=torch.cat((x0,x1),0).type(torch.FloatTensor)\nyold=torch.cat((y0,y1),).type(torch.LongTensor)\nxold,yold=Variable(xold),Variable(yold)\nnp.savetxt(\"train_original.csv\",xold.data.numpy(),delimiter=\",\")\n#plt.scatter(xold.data.numpy()[:, 0], xold.data.numpy()[:, 1], c=yold.data.numpy(), s=100, lw=0, cmap='RdYlGn')\n#plt.show()\nN=4\nnm=torch.randn(N,2,2)\ntem1=torch.zeros(100)\nnm=torch.randn(N,2,2)\nfor j in range(N):\n numberq=int(totalnumber/N)\n nm1=nm[j,:,:]\n for i in range(numberq):\n point=torch.zeros(1,2)\n point[0,0]=x0[i+j*numberq,0]\n point[0,1]=x0[i+j*numberq,1]\n newpoint=torch.mm(point,nm1)\n x0[i+j*numberq,0]=newpoint[0,0]\n x0[i+j*numberq,1]=newpoint[0,1]\n for i in range(numberq):\n point=torch.zeros(1,2)\n point[0,0]=x1[i+j*numberq,0]\n point[0,1]=x1[i+j*numberq,1]\n newpoint=torch.mm(point,nm1)\n x1[i+j*numberq,0]=newpoint[0,0]\n x1[i+j*numberq,1]=newpoint[0,1]\nx0=x0.numpy()\nx1=x1.numpy()\n#print(tem1/2.828)\n\nP=np.zeros(totalnumber*totalnumber)\nfor i in range(totalnumber):\n for j in range(totalnumber):\n t1=(x0[i,0]-x1[j,0])\n t2=(x0[i,0]-x1[j,0])\n P[i*totalnumber+j]=math.sqrt(t1*t1+t2*t2)\n #print(P[i*totalnumber+j])\nprint(np.mean(P))\n\n\ndata = P\ntem = 20\ndatamax = np.amax(data)\ndatamin = np.amin(data)\n#print(datamax)\n#print(datamin)\nwidth = (datamax-datamin)/tem\nsumd = np.zeros(tem+1)\nxranged = np.zeros(tem+1)\nfor i in range(tem+1):\n xranged[i] = i*width+datamin\nfor i in range(data.shape[0]):\n P1 = np.where(xranged >= data[i]-0.0001)\n #print(P1[0])\n #print(data[i])\n # print(P1[0][0])\n sumd[P1[0][0]] = sumd[P1[0][0]]+1\n#print(data.shape[0])\nsumd = sumd/data.shape[0]\ncdf = np.zeros(tem+1)\nfor i in range(tem+1):\n for j in range(i):\n cdf[i] = cdf[i]+sumd[j]\nprint(cdf)\nnp.savetxt('cdf_projected.txt', cdf, delimiter='/')\nnp.savetxt('xrange_projected.txt', xranged, delimiter='/')\n\n#plt.show()\n\n\n\n\n\n\n\n","sub_path":"CDF_projected.py","file_name":"CDF_projected.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"184123391","text":"import sqlite3\nimport os\n\ntable_names = [\"graveyard\", \"customers\", \"targets\", \"materials\", \"waybills\", \"work_orders\", \"heaps\", \"table_properties\", \"loads\", \"loads_waybills\"]\n\n\ndef addnewdb(myid, dbid):\n try:\n os.mkdir(\"databases/\" + str(myid), 0o777)\n os.chmod(\"databases/\" + str(myid), 0o777)\n except OSError:\n pass\n # print(\"Folder already exists\")\n\n newdb = sqlite3.connect(\"databases/\" + myid + \"/\" + str(dbid), isolation_level=None)\n os.chmod(\"databases/\" + myid + \"/\" + str(dbid), 0o777)\n c = newdb.cursor()\n\n # c.execute('''CREATE TABLE android_metadata (locale TEXT)''')\n\n c.execute('''CREATE TABLE customers(_ID integer primary key autoincrement,\n c_name text not null unique,\n c_name_id text not null,\n c_contact text not null,\n c_phone text not null,\n c_date integer default 0,\n c_misc text not null\n )''')\n\n # c.execute('''CREATE TABLE sqlite_sequence(name,seq)''')\n\n c.execute('''CREATE TABLE materials(\n _ID integer primary key autoincrement,\n m_name text not null unique,\n m_date integer default 0,\n m_name_id text not null\n )''')\n\n c.execute('''CREATE TABLE targets(\n _ID integer primary key autoincrement,\n t_parent_id integer default 0,\n t_name text unique,\n t_capacity integer not null,\n t_date integer default 0,\n t_misc text not null\n )''')\n\n c.execute('''CREATE TABLE work_orders(\n _ID integer primary key autoincrement,\n wo_name text unique,\n wo_customer_id integer,\n wo_capacity integer default 0,\n wo_misc text not null,\n wo_due_time integer default 0,\n wo_date integer default 0,\n wo_finished integer default 0,\n wo_weighing integer default 0,\n FOREIGN KEY (wo_customer_id) REFERENCES customers (_ID) ON DELETE SET NULL\n )''')\n\n c.execute('''CREATE TABLE table_properties(\n _ID integer primary key autoincrement,\n p_active_material_id integer,\n p_active_heap_type integer default 0,\n p_trip_heap_id integer,\n p_project_heap_id integer,\n p_cloud_heap_id integer,\n FOREIGN KEY (p_active_material_id) REFERENCES materials (_ID) ON DELETE SET NULL,\n FOREIGN KEY (p_trip_heap_id) REFERENCES heaps (_ID) ON DELETE SET NULL,\n FOREIGN KEY (p_project_heap_id) REFERENCES heaps (_ID) ON DELETE SET NULL,\n FOREIGN KEY (p_cloud_heap_id) REFERENCES heaps (_ID) ON DELETE SET NULL\n )''')\n\n c.execute('''CREATE TABLE heaps(\n _ID integer primary key autoincrement,\n h_workorder integer,\n h_target integer,\n h_date integer default 0,\n h_finished integer default 0,\n h_weighing integer default 0,\n FOREIGN KEY (h_workorder) REFERENCES work_orders (_ID) ON DELETE SET NULL,\n FOREIGN KEY (h_target) REFERENCES targets (_ID) ON DELETE SET NULL\n )''')\n\n c.execute('''CREATE TABLE loads(\n _ID integer primary key autoincrement,\n l_weight integer,\n l_date integer,\n l_fuel integer default 0,\n l_distance integer default 0,\n l_tool integer default 0,\n l_heap integer, l_material integer,\n FOREIGN KEY (l_heap) REFERENCES heaps (_ID) ON DELETE SET NULL,\n FOREIGN KEY (l_material) REFERENCES materials (_ID) ON DELETE SET NULL\n )''')\n\n c.execute('''CREATE TABLE waybills(\n _ID integer primary key autoincrement,\n wb_date integer default 0,\n wb_supplier text not null,\n wb_customer text not null,\n wb_workorder text not null,\n wb_target text not null,\n wb_freetext text not null,\n wb_location text not null,\n wb_operator text not null,wb_obw integer default 0\n )''')\n\n c.execute('''CREATE TABLE loads_waybills(\n _ID integer primary key autoincrement,\n lwb_load integer,\n lwb_waybill integer,\n FOREIGN KEY (lwb_load) REFERENCES loads (_ID) ON DELETE CASCADE,\n FOREIGN KEY (lwb_waybill) REFERENCES waybills (_ID) ON DELETE CASCADE\n )''')\n\n # c.execute('''CREATE TABLE garbage(\n # _ID integer primary key autoincrement,\n # table text not null,\n # row integer not null\n # )''')\n\n c.execute('''CREATE INDEX h_workorder_index ON heaps (h_workorder)''')\n\n c.execute('''CREATE INDEX h_target_index ON heaps (h_target)''')\n\n c.execute('''CREATE INDEX h_date_index ON heaps (h_date)''')\n\n c.execute('''CREATE INDEX l_material_index ON loads (l_material)''')\n\n c.execute('''CREATE INDEX l_heap_index ON loads (l_heap)''')\n\n c.execute('''CREATE INDEX l_date_index ON loads (l_date)''')\n\n c.execute('''CREATE INDEX wo_customer_index ON work_orders (wo_customer_id)''')\n\n c.execute('''CREATE INDEX wo_finished_index ON work_orders (wo_finished)''')\n\n c.execute('''CREATE INDEX wo_date_index ON work_orders (wo_date)''')\n\n c.execute('''CREATE TRIGGER delete_compartments \n AFTER DELETE ON targets\n FOR EACH ROW BEGIN DELETE FROM targets \n WHERE t_parent_id = OLD._id; END''')\n\n c.execute('''CREATE TRIGGER create_compartment_heap BEFORE INSERT ON heaps \n WHEN NEW.h_target > 0 AND \n (SELECT targets.t_parent_id FROM targets WHERE targets._ID = NEW.h_target LIMIT 1) <= 0 \n BEGIN INSERT INTO heaps (h_workorder, h_target, h_date, h_finished, h_weighing) \n SELECT new. h_workorder, targets, ID, new.h_date, new.h_finished, new.h_weighing FROM targets \n WHERE new.h_target = targets.t_parent_id; END''')\n\n c.execute('''CREATE TABLE graveyard(_ID integer primary key autoincrement,\n c_databaseid text not null,\n c_tablename text not null,\n c_rowid integer not null\n )''')\n\n c.close()\n\n\ndef dbaddentry(myid, dbid, table, entry):\n dbcheckqueryparam(table)\n if not dbexistcheck(myid, dbid):\n addnewdb(myid, dbid)\n\n conn = sqlite3.connect(\"databases/\" + myid + \"/\" + str(dbid), isolation_level=None)\n c = conn.cursor()\n c.execute(\"PRAGMA table_info(%s)\" % table)\n columns = len(c.fetchall())\n cur = c.execute(\"SELECT * from %s\" % table)\n\n names = list(map(lambda x: x[0], cur.description))\n\n cnames = table + \"(\" + \",\".join(names[1:]) + \")\"\n try:\n c.execute('''INSERT INTO {tn} VALUES ({q})'''.format(tn=cnames, q=\",\".join([\"?\"]*(columns-1))), entry[1:])\n except sqlite3.IntegrityError as e:\n print(\"row already added: \", e, \" \", entry, \" \", dbid)\n c.close()\n\n\ndef dbquery(myid, dbid): # get all of mydb\n dbaste = {}\n if not dbexistcheck(myid, dbid):\n addnewdb(myid, dbid)\n conn = sqlite3.connect(\"databases/\" + myid + \"/\" + str(dbid))\n c = conn.cursor()\n\n for name in table_names:\n c.execute(\"SELECT * FROM %s\" % name)\n dbaste[name] = c.fetchall()\n\n conn.close()\n\n return dbaste\n\n\ndef dbdeltaquery(myid, dbid, table, nrtograb):\n if not dbexistcheck(myid, dbid):\n addnewdb(myid, dbid)\n conn = sqlite3.connect(\"databases/\" + myid + \"/\" + str(dbid))\n c = conn.cursor()\n\n c.execute(\"SELECT * FROM %s ORDER BY _ID DESC LIMIT %s\" % (table, str(nrtograb+1)))\n delta_state = c.fetchall()\n delta_state.reverse()\n\n #print(\"DELTA STATE: \", delta_state)\n #print(\"DBID: \", dbid)\n\n final_delta_state = [entry for entry in delta_state if not dbgraveyardcheck(myid, dbid, table, entry[0])]\n\n #print(\"DS2\", delta_state)\n\n if final_delta_state:\n return final_delta_state\n\n\ndef dbgetstate(myid,dbid):\n state_dict = {}\n conn = sqlite3.connect(\"databases/\" + myid + \"/\" + str(dbid), isolation_level=None)\n c = conn.cursor()\n\n seq = c.execute(\"SELECT * FROM SQLITE_SEQUENCE\").fetchall()\n\n for table in table_names:\n state_dict[table] = 0\n\n for table, entry in seq:\n state_dict[table] = entry\n\n conn.close()\n\n\ndef dbexistcheck(myid, dbid):\n try:\n c = sqlite3.connect(\"file:{}?mode=rw\".format(\"databases/\" + myid + \"/\" + str(dbid)), uri=True)\n c.close()\n return True\n except sqlite3.OperationalError:\n return False\n\n\ndef dbentryexist(myid, dbid, table, key):\n conn = sqlite3.connect(\"databases/\" + myid + \"/\" + str(dbid))\n c = conn.cursor()\n\n if not dbcheckqueryparam(table):\n conn.close()\n print(\"Not valid tablename\")\n\n r = c.execute(\"SELECT COUNT(*) FROM %s WHERE _ID = %s\" % (table, key)).fetchall()\n return r[0][0]\n\n\ndef dbcheckqueryparam(param):\n if param == \"android_metadata\":\n return True\n elif param == \"customers\":\n return True\n elif param == \"heaps\":\n return True\n elif param == \"loads\":\n return True\n elif param == \"loads_waybills\":\n return True\n elif param == \"materials\":\n return True\n elif param == \"table_properties\":\n return True\n elif param == \"targets\":\n return True\n elif param == \"waybills\":\n return True\n elif param == \"work_orders\":\n return True\n elif param == \"graveyard\":\n return True\n else:\n return False\n\n\ndef dbgraveyardcheck(myid, dbid, table, key):\n conn = sqlite3.connect(\"databases/\" + myid + \"/\" + str(dbid))\n c = conn.cursor()\n\n if not dbcheckqueryparam(table):\n conn.close()\n print(\"Not valid tablename\")\n\n r = c.execute(\"SELECT COUNT(*) FROM graveyard WHERE c_databaseid='%s' AND c_tablename = '%s' AND c_rowid = '%s'\" % (dbid, table, key)).fetchall()\n #print(\"Graveyard: \", r)\n return r[0][0]\n\n\ndef dbdeleteentry(myid, dbid, table, key):\n #conn = sqlite3.connect(\"databases/\" + myid + \"/\" + str(dbid), isolation_level=None)\n #c = conn.cursor()\n\n if not dbcheckqueryparam(table):\n #conn.close()\n print(\"Not valid tablename\")\n else:\n #c.execute(\"DELETE from %s where _ID = %s\" % (table, str(key)))\n dbaddentry(myid, myid, \"graveyard\", (0, dbid, table, key))\n","sub_path":"rollbackConnect.py","file_name":"rollbackConnect.py","file_ext":"py","file_size_in_byte":11017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"542457464","text":"import numpy as np\nimport sys\n\nnp.set_printoptions(threshold=np.nan, precision=10, suppress=True, linewidth=500)\n_leaders = [(2,15),(5,7),(9,27),(18,29),(25,35)]\n_snakes = [(17,4),(24,16),(20,6),(34,12),(32,30)]\nw0 = np.zeros([1, 36], int)\nif(len(sys.argv) > 1):\n\tk = int(sys.argv[1])\nif(len(sys.argv) > 2):\n\tw0[0][int(sys.argv[2])] = 1\nelse:\n\tw0[0][0] = 1\t#default\n\ndef PSpawer(_leaders, _snakes, tam):\n\t_n = tam[0]*tam[1]\n\tprint('spawing P')\n\tP = np.zeros([36,36], dtype=float)\n\tfor x in range(0, _n):\n\t\tdestiny = [-1, -1]\t#[x+1, x+2]\n\t\tline_zeros = False\n\t\t\n\t\tfor tupla in sorted(_leaders+_snakes):\t#finding sneakes or _leaders\n\t\t\tif(x == tupla[0]-1):\n\t\t\t\tline_zeros = True\n\t\t\t\tbreak \t#zero's line\n\n\t\t\tif(x+1 == tupla[0]-1 and destiny[0] == -1):\t#finding in x+1\n\t\t\t\tdestiny[0] = tupla[1]\n\t\t\tif(x+2 == tupla[0]-1):\t#finding in x+2\n\t\t\t\tdestiny[1] = tupla[1]\n\t\t\tif(tupla[0]-1 > x+2):\t#breaking loop to best performance, because the tuples are ordered\n\t\t\t\tbreak\n\n\t\tif(not line_zeros):\n\t\t\tif(destiny[0] != -1):\n\t\t\t\tP[x][destiny[0]-1] = 0.5\n\t\t\telif(x+1 < _n):\n\t\t\t\tP[x][x+1] = 0.5\n\t\t\telse:\t#i am in the last state\n\t\t\t\tP[x][x] = 1\t\n\n\t\t\tif(destiny[1] != -1):\n\t\t\t\tP[x][destiny[1]-1] = 0.5\n\t\t\telif(x+2 < _n):\n\t\t\t\tP[x][x+2] = 0.5\n\t\t\telif(x+1 < _n):\n\t\t\t\tP[x][x+1] = 1\t\t\t\n\tprint(P)\n\treturn P\n\ndef PValidator(P):\n\t#printing transitions\n\tzeros_lines = ()\n\tfor line in range(0,P.shape[0]):\n\t\tconnections = ()\n\t\tfor collum in range(0,P.shape[1]):\n\t\t\tif(P[line][collum] != 0):\n\t\t\t\tconnections += tuple([collum])\n\t\tif(connections == ()):\n\t\t\tprint(str(line + 1) + ': No where')\n\t\t\tzeros_lines += tuple([line])\n\t\telse:\n\t\t\tfor j in connections:\n\t\t\t\tprint('%d: %d' % (line+1, j+1))\n\t#verifing errors\n\tprint('verifing zero lines: ' + str(zeros_lines))\n\tbad_jumper = False\n\tfor line in zeros_lines:\n\t\tfor j in range(0, P.shape[1]):\n\t\t\tif(P[j][line]):\n\t\t\t\tprint('There is an error on position ( %d , %d )' % (j, line))\n\t\t\t\tbad_jumper = True\n\tif(not bad_jumper):\n\t\tprint('There is no bad jumper')\n\n\ndef PoweredPValidator(pP):\n\tvetor = np.zeros([1,36], float)\n\tfor line in range(0, pP.shape[0]):\n\t\tfor collum in range(0, pP.shape[1]):\n\t\t\tvetor[0][line] += pP[line][collum]\n\tprint('Validation:' + str(vetor))\n\ndef PowerMethod(w0, P, k):\n\tprint('spawing P^k')\n\tpoweredP = P\n\tfor i in range(0,k):\n\t\tpoweredP = MultMatrix(poweredP, P)\n\tprint(poweredP)\n\tprint('Validating')\n\tPoweredPValidator(poweredP)\n\tprint('Calculando distribuicao estacionaria')\n\tdist = MultMatrix(w0, poweredP)\n\tprint(dist)\n\treturn dist\n\ndef MultMatrix(a, b):\n\tif(a.shape[1] != b.shape[0]):\t#no possible result\n\t\traise ArithmeticError ('The first matrix\\'s number of colluns must be equal then the second matrix\\' number of lines')\n\telse:\n\t\taXb = np.zeros([a.shape[0], b.shape[1]], int if(a.dtype == int and b.dtype == int)else float)\n\t\tfor line in range(0, a.shape[0]):\n\t\t\tfor collum in range(0, b.shape[1]):\n\t\t\t\tfor jumper in range(b.shape[0]):\n\t\t\t\t\taXb[line][collum] += a[line][jumper]*b[jumper][collum]\n\t\treturn aXb\n\nP = PSpawer(_leaders, _snakes, [6,6])\nPValidator(P)\ndist = PowerMethod(w0, P, k)","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"602841907","text":"\"\"\"\nCreated on Mon Dec 10 10:31:42 2018\n\n@author: semvijverberg\n\"\"\"\n\nimport os, sys\n\n\nif os.path.isdir(\"/Users/semvijverberg/surfdrive/\"):\n basepath = \"/Users/semvijverberg/surfdrive/\"\n data_base_path = basepath\nelse:\n basepath = \"/home/semvij/\"\n data_base_path = \"/p/projects/gotham/semvij/\"\n \nos.chdir(os.path.join(basepath, 'Scripts/CPPA_vs_PEP/'))\nscript_dir = os.getcwd()\nsys.path.append(script_dir)\nif sys.version[:1] == '3':\n from importlib import reload as rel\n\nimport numpy as np\nimport xarray as xr \nimport pandas as pd\n#import cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n#import scipy\nimport func_PEP\nimport load_data\n\nfrom ROC_score import plotting_timeseries\n\n\n\n\ndatafolder = 'NOAA'\npath_pp = os.path.join(data_base_path, 'Data_'+datafolder +'/input_pp') # path to netcdfs\nif os.path.isdir(path_pp) == False: os.makedirs(path_pp)\n\n\n# =============================================================================\n# General Settings\n# =============================================================================\nex = {'datafolder' : datafolder,\n 'grid_res' : 1.0,\n 'startyear' : 1982,\n 'endyear' : 2017,\n 'path_pp' : path_pp,\n 'startperiod' : '06-24', #'1982-06-24',\n 'endperiod' : '08-22', #'1982-08-22',\n 'figpathbase' : os.path.join(basepath, 'McKinRepl/'),\n 'RV1d_ts_path' : os.path.join(basepath, 'MckinRepl/RVts'),\n 'RVts_filename': 'T95_Bram_mcK.npy',\n 'RV_name' : 'T95',\n 'name' : 'sst_NOAA',\n 'add_lsm' : False,\n 'region' : 'Northern',\n 'regionmcK' : 'PEPrectangle',\n 'lags' : [0, 15, 30, 50], #[0, 5, 10, 15, 20, 30, 40, 50, 60], #[5, 15, 30, 50] #[10, 20, 30, 50] \n 'plot_ts' : True,\n }\n# =============================================================================\n# Settings for event timeseries\n# =============================================================================\nex['tfreq'] = 1\nex['max_break'] = 0 \nex['min_dur'] = 1\nex['event_percentile'] = 'std'\n# =============================================================================\n# Settins for PEP\n# =============================================================================\nex['load_mcK'] = '1bram' # or '1bram' for extended or '1' for mcK ts\nex['filename_precur'] = '{}_{}-{}_1jan_31dec_daily_{}deg.nc'.format(\n ex['name'], ex['startyear'], ex['endyear'], ex['grid_res'])\nex['rollingmean'] = ('CPPA', 1)\n# =============================================================================\n# Settings for validation \n# =============================================================================\nex['leave_n_out'] = True\nex['ROC_leave_n_out'] = False\nex['method'] = 'iter' #'iter' or 'no_train_test_split' or split#8 or random3 \nex['n_boot'] = 0\n# =============================================================================\n# load data (write your own function load_data(ex) )\n# =============================================================================\n\nRV_ts, Prec_reg, ex = load_data.load_data(ex)\n\n\nex['exppathbase'] = '{}_PEP_{}_{}_{}'.format(datafolder, ex['RV_name'],ex['name'],\n ex['regionmcK'])\nex['figpathbase'] = os.path.join(ex['figpathbase'], ex['exppathbase'])\nif os.path.isdir(ex['figpathbase']) == False: os.makedirs(ex['figpathbase'])\n\n\nprint_ex = ['RV_name', 'name', 'max_break',\n 'min_dur', 'grid_res', 'startyear', 'endyear', \n 'startperiod', 'endperiod', 'leave_n_out',\n 'n_oneyr', 'method', 'ROC_leave_n_out',\n 'tfreq', 'lags', 'n_yrs', \n 'rollingmean', 'event_percentile',\n 'event_thres', \n 'region', 'regionmcK',\n 'add_lsm', 'n_boot']\n\ndef printset(print_ex=print_ex, ex=ex):\n max_key_len = max([len(i) for i in print_ex])\n for key in print_ex:\n key_len = len(key)\n expand = max_key_len - key_len\n key_exp = key + ' ' * expand\n printline = '\\'{}\\'\\t\\t{}'.format(key_exp, ex[key])\n print(printline)\n\n\nprintset()\nn = 1\n#%% Run code with ex settings\n\n\n\nl_ds_PEP, ex = func_PEP.main(RV_ts, Prec_reg, ex)\n\n\noutput_dic_folder = ex['output_dic_folder']\n\n\n# save ex setting in text file\n\nif os.path.isdir(output_dic_folder):\n answer = input('Overwrite?\\n{}\\ntype y or n:\\n\\n'.format(output_dic_folder))\n if 'n' in answer:\n assert (os.path.isdir(output_dic_folder) != True)\n elif 'y' in answer:\n pass\n\nif os.path.isdir(output_dic_folder) != True : os.makedirs(output_dic_folder)\n\n# save output in numpy dictionary\nfilename = 'output_main_dic'\nif os.path.isdir(output_dic_folder) != True : os.makedirs(output_dic_folder)\nto_dict = dict( { 'ex' : ex,\n 'l_ds_PEP' : l_ds_PEP} )\nnp.save(os.path.join(output_dic_folder, filename+'.npy'), to_dict) \n\n# write output in textfile\nprint_ex.append('output_dic_folder')\ntxtfile = os.path.join(output_dic_folder, 'experiment_settings.txt')\nwith open(txtfile, \"w\") as text_file:\n max_key_len = max([len(i) for i in print_ex])\n for key in print_ex:\n key_len = len(key)\n expand = max_key_len - key_len\n key_exp = key + ' ' * expand\n printline = '\\'{}\\'\\t\\t{}'.format(key_exp, ex[key])\n print(printline)\n print(printline, file=text_file)\n\n\n\n#%% Generate output in console\n\n\n\nfilename = 'output_main_dic'\ndic = np.load(os.path.join(output_dic_folder, filename+'.npy'), encoding='latin1').item()\n\n# load settings\nex = dic['ex']\nl_ds_PEP = dic['l_ds_PEP']\n\n\nex['n_boot'] = 0\n\n# write output in textfile\npredict_folder = 'PEP'\nex['exp_folder'] = os.path.join(ex['CPPA_folder'], predict_folder)\npredict_folder = os.path.join(ex['figpathbase'], ex['exp_folder'])\nif os.path.isdir(predict_folder) != True : os.makedirs(predict_folder)\n\ntxtfile = os.path.join(predict_folder, 'experiment_settings.txt')\nwith open(txtfile, \"w\") as text_file:\n max_key_len = max([len(i) for i in print_ex])\n for key in print_ex:\n key_len = len(key)\n expand = max_key_len - key_len\n key_exp = key + ' ' * expand\n printline = '\\'{}\\'\\t\\t{}'.format(key_exp, ex[key])\n print(printline, file=text_file)\n print(printline)\n\n\n# =============================================================================\n# perform prediciton \n# =============================================================================\n\n\n#ex, l_ds_CPPA = func_pred.make_prediction(l_ds_CPPA, l_ds_PEP, Prec_reg, ex)\n\nex['exp_folder'] = os.path.join(ex['CPPA_folder'], 'PEP')\nex = func_PEP.only_spatcov_wrapper(l_ds_PEP, RV_ts, Prec_reg, ex)\n\n\nscore_AUC = np.round(ex['score'][-1][0], 2)\nROC_str = ['{} days - ROC score {}'.format(ex['lags'][i], score_AUC[i]) for i in range(len(ex['lags'])) ]\nROC_boot = [np.round(np.percentile(ex['score'][-1][1][i],99), 2) for i in range(len(ex['lags']))]\n\nex['score_AUC'] = score_AUC\nex['ROC_boot_99'] = ROC_boot\n\nfilename = 'output_main_dic'\nto_dict = dict( { 'ex' : ex,\n 'l_ds_PEP' : l_ds_PEP} )\nnp.save(os.path.join(output_dic_folder, filename+'.npy'), to_dict) \n\n#%%\n# =============================================================================\n# Plotting\n# =============================================================================\n\n\nPrec_mcK = func_PEP.find_region(Prec_reg, region=ex['region'])[0][0]\nlats = Prec_mcK.latitude\nlons = Prec_mcK.longitude\narray = np.zeros( (ex['n_conv'], len(ex['lags']), len(lats), len(lons)) )\npatterns_mcK = xr.DataArray(data=array, coords=[range(ex['n_conv']), ex['lags'], lats, lons], \n dims=['n_tests', 'lag','latitude','longitude'], \n name='{}_tests_patterns_mcK'.format(ex['n_conv']), attrs={'units':'Kelvin'})\n\nfor n in range(len(ex['train_test_list'])):\n ex['n'] = n\n\n \n if (ex['method'][:6] == 'random'):\n if n == ex['n_conv']:\n # remove empty n_tests\n patterns_mcK = patterns_mcK.sel(n_tests=slice(0,ex['n_conv']))\n ex['n_conv'] = ex['n_conv']\n \n patterns_mcK[n,:,:,:] = l_ds_PEP[n]['pattern'].sel(lag=ex['lags'])\n\n\n \nkwrgs = dict( {'title' : '', 'clevels' : 'notdefault', 'steps':17,\n 'vmin' : -0.4, 'vmax' : 0.4, 'subtitles' : ROC_str,\n 'cmap' : plt.cm.RdBu_r, 'column' : 1,\n 'cbar_vert' : 0.02, 'cbar_hght' : -0.01,\n 'adj_fig_h' : 0.9, 'adj_fig_w' : 1., \n 'hspace' : 0.2, 'wspace' : 0.08,\n 'title_h' : 0.95} )\n\n# mcKinnon composite mean plot\nkwrgs['drawbox'] = True\nfilename = os.path.join(ex['exp_folder'], '{}_PEP_mean_composite_tf{}_{}'.format(\n ex['datafolder'], ex['tfreq'], ex['lags']))\nmcK_mean = patterns_mcK.mean(dim='n_tests')\nmcK_mean.name = 'Composite mean green rectangle'\nmcK_mean.attrs['units'] = 'Kelvin'\nmcK_mean.attrs['title'] = 'Composite mean'\nimport func_CPPA\nfunc_CPPA.plotting_wrapper(mcK_mean, ex, filename, kwrgs=kwrgs)\n\n\n\n#%% Plot time series events:\nfolder = '/Users/semvijverberg/Downloads/'\nfunc_PEP.plot_oneyr_events_allRVts(ex, 2012, folder, saving=True)\n\n\n \n \n#%%\nif ex['load_mcK'] == False:\n filename = os.path.join(ex['RV1d_ts_path'], ex['RVts_filename'])\n dicRV = np.load(filename, encoding='latin1').item()\n folder = os.path.join(ex['figpathbase'], ex['exp_folder'])\n xarray_plot(dicRV['RV_array']['mask'], path=folder, name='RV_mask', saving=True)\n \nfunc_CPPA.plot_oneyr_events(RV_ts, ex, 2012, ex['output_dic_folder'], saving=True)\n## plotting same figure as in paper\n#for i in range(2005, 2010):\n# func_CPPA.plot_oneyr_events(RV_ts, ex, i, folder, saving=True)\n\n\n\n\n#%% Plotting prediciton time series vs truth:\nyrs_to_plot = [1985, 1990, 1995, 2004, 2007, 2012, 2015]\n#yrs_to_plot = list(np.arange(ex['startyear'],ex['endyear']+1))\ntest = ex['train_test_list'][0][1] \nplotting_timeseries(test, yrs_to_plot, ex) \n\n\n\n\n\n\n\n","sub_path":"main_PEP_NOAA.py","file_name":"main_PEP_NOAA.py","file_ext":"py","file_size_in_byte":10146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"377605095","text":"import re\r\nfrom functools import wraps\r\nfrom .urls import Urls\r\nfrom .utils.request import Session\r\nfrom .errors import MixtapesError\r\nfrom .frontend.display import Print,Verbose\r\nfrom .backend.config import User,Datatype\r\nfrom .backend.mixsetup import Pages\r\n\r\n\r\nclass Mixtapes(object):\r\n category = list(Urls.category)\r\n\r\n def __new__(cls, *args, **kwargs):\r\n return super(Mixtapes, cls).__new__(cls)\r\n\r\n\r\n def __init__(self, category=None,search=None,*args,**kwargs):\r\n \"\"\"\r\n Mixtapes Initialization.\r\n\r\n :param: category - name of the catgeory to search from.\r\n see Mixtapes.category\r\n\r\n :param: search - search for an artist or mixtape's name\r\n \"\"\"\r\n super(Mixtapes,self).__init__(*args,**kwargs)\r\n self._session = Session()\r\n self._session.clear_cache()\r\n self._Start(category,search)\r\n self._setup()\r\n\r\n\r\n def __str__(self):\r\n category = \"'new','hot','top','celebrated'\"\r\n return \"%s(category) argument: category --> %s\"%(self.__class__.__name__, category)\r\n\r\n\r\n def __repr__(self):\r\n return \"%s('hot')\"%self.__class__.__name__\r\n\r\n\r\n def __len__(self):\r\n if hasattr(self,'_artists'):\r\n return len(self._artists)\r\n else:\r\n return 0\r\n\r\n\r\n def search(self,name):\r\n \"\"\"\r\n Search for an artist or mixtape's name.\r\n\r\n :param: name - name of an artist or mixtapes name \r\n \"\"\"\r\n name = str(name).strip()\r\n if not name: \r\n return \r\n\r\n Verbose('\\nSearching for %s mixtapes ...'%name.title())\r\n url = Urls.url['search']\r\n return self._session.method('POST',url,data=Urls.payload(name))\r\n\r\n\r\n def _Start(self,category='hot', search=None):\r\n \"\"\"\r\n Initial setup. Gets all available mixtapes.\r\n \r\n :param: category - name of the catgeory to search from.\r\n (self Mixtapes.category)\r\n :param: search - search for an artist or mixtape's name\r\n \"\"\"\r\n if search: # Search for an artist \r\n body = self.search(search)\r\n if not body or body is None: # on failure return response from a category \r\n return self._Start('hot')\r\n else: # Select from category instead of searching \r\n select = User.choice_is_str\r\n choice = select(category,Urls.category) or select('hot',Urls.category)\r\n body = self._session.method('GET',choice)\r\n self._responses = body\r\n return body\r\n\r\n \r\n def _setup(self):\r\n \"\"\"Initial class variable and set theirattributes on page load up.\r\n \"\"\"\r\n # all method below are property setter method \r\n # each \"re string\" get pass to the corresponding html response text\r\n\r\n self.artists = '
(.*[.\\w\\s]*)
'\r\n self.mixtapes = '\"\\stitle\\=\"listen to ([^\"]*)\">[\\r\\n\\t\\s]?.*img'\r\n self.links = 'title\"> conf:\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n label = LABELS[idx]\n cv2.rectangle(frame, (startX, startY), (endX, endY),\n COLORS[idx], 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n\n if label == this_label:\n client.publish(mqtt_topic, mqtt_message)\n\n cv2.putText(frame, label, (startX, y),font, 0.5, COLORS[idx], 2)\n cv2.putText(frame,\"FPS: {0:.2f}\".format(frame_rate_calc),(30,50),font,1,(255,255,0),2,cv2.LINE_AA)\n\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n t2 = cv2.getTickCount()\n time1 = (t2-t1)/freq\n frame_rate_calc = 1/time1\n\n if key == ord(\"q\"):\n break\n fps.update()\n\ncv2.destroyAllWindows()\nvs.stop()\n","sub_path":"opencv/mobilenet-ssd-python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"464822528","text":"#! /usr/bin/env python\n\nimport argparse\nimport json\nimport urllib2\nimport sys\n\n\ndef getPopulation(locale, t_name, t_data):\n # Extract population for locale from territory data\n t_population = t_data['_population']\n\n if ('languagePopulation' in t_data and\n locale in t_data['languagePopulation'] and\n '_populationPercent' in t_data['languagePopulation'][locale]):\n percentage = t_data['languagePopulation'][locale]['_populationPercent']\n l_population = int(round(float(percentage)/100 * int(t_population)))\n print('Adding {}: {} ({}% of {})'.format(t_name, l_population, percentage, t_population))\n return l_population\n else:\n return -1\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('locale', help='Locale code to search for')\n args = parser.parse_args()\n locale = args.locale\n\n # Get CLDR data\n cldr_source = 'https://raw.githubusercontent.com/unicode-cldr/cldr-core/master/supplemental/territoryInfo.json'\n cldr_data = json.load(urllib2.urlopen(cldr_source))\n\n # Initialize data\n population = 0\n territories = []\n\n # If the locale code has a region, only check that territory\n if '-' in locale:\n locale, territory = locale.split('-')\n if not territory in cldr_data['supplemental']['territoryInfo']:\n print('Region {} is not available in CLDR'.format(territory))\n sys.exit(0)\n territories.append(territory)\n population = getPopulation(locale, territory, cldr_data['supplemental']['territoryInfo'][territory])\n else:\n for territory, territory_data in cldr_data['supplemental']['territoryInfo'].iteritems():\n l_population = getPopulation(locale, territory, territory_data)\n if l_population == -1:\n # Territory is not defined\n continue\n population += l_population\n territories.append(territory)\n\n if territories:\n territories.sort()\n print('--------')\n print('Territories: {}'.format(', '.join(territories)))\n print('Population: {}'.format(population))\n print('--------')\n else:\n print('{} is not available in CLDR'.format(locale))\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/cldr_population.py","file_name":"cldr_population.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"488018889","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Main module.\"\"\"\nimport requests\nimport sseclient\nimport json\nimport os\nfrom time import sleep\n\n\nclass Client:\n HEADERS = {'Content-Type': 'Application/json', 'Accept': 'Application/json'}\n BASE_URL = 'https://bot.sapp.ir/'\n GET_MESSAGE_URL = '/getMessage'\n SEND_MESSAGE_URL = '/sendMessage'\n DOWNLOAD_FILE_URL = '/downloadFile/'\n UPLOAD_FILE_URL = '/uploadFile'\n RETRY_DELAY = 10\n\n def __init__(self, token):\n self.token = token\n\n def get_upload_file_url(self):\n if not self.token:\n raise ValueError('Invalid bot token')\n\n return self.BASE_URL + self.token + self.UPLOAD_FILE_URL\n\n def get_download_file_url(self, file_url):\n if not self.token:\n raise ValueError('Invalid bot token')\n if not file_url:\n raise ValueError('Invalid file url')\n\n return self.BASE_URL + self.token + self.DOWNLOAD_FILE_URL + file_url\n\n def get_messages(self):\n if not self.token:\n raise ValueError('Invalid bot token')\n\n url = self.BASE_URL + self.token + self.GET_MESSAGE_URL\n\n while True:\n response = requests.get(url, stream=True)\n if 'Content-Type' in response.headers:\n client = sseclient.SSEClient(response)\n\n for event in client.events():\n try:\n message_event = json.loads(event.data)\n yield message_event\n except Exception as e:\n print(e.args[0])\n continue\n else:\n print('Invalid bot token OR Invalid connection response from server')\n\n sleep(self.RETRY_DELAY)\n\n def send_message(self, post_data):\n if not self.token:\n raise ValueError('Invalid bot token')\n\n url = self.BASE_URL + self.token + self.SEND_MESSAGE_URL\n\n post_data = json.dumps(post_data, separators=(',', ':'))\n\n try:\n response = requests.post(url, post_data, headers=self.HEADERS)\n\n if response:\n response_json = json.loads(response.text)\n if 'resultCode' in response_json:\n if response_json['resultCode'] == 200:\n return [False, 'OK']\n else:\n if 'resultMessage' in response_json:\n return [response_json['resultMessage'], False]\n else:\n return ['Unknown Error', False]\n else:\n return ['Invalid Response', False]\n else:\n return ['Invalid Request', False]\n except Exception as e:\n return [e.args[0], False]\n\n def send_text(self, to, text, keyboard=None):\n\n post_data = {\n 'type': 'TEXT',\n 'to': to,\n 'body': text,\n }\n if keyboard is not None:\n post_data['keyboard'] = keyboard\n return self.send_message(post_data)\n\n def send_file(self, to, body, file_name, file_type, file_url, file_size, extra_params={}):\n post_data = {\n 'to': to,\n 'body': body,\n 'type': 'FILE',\n 'fileName': file_name,\n 'fileType': file_type,\n 'fileUrl': file_url,\n 'fileSize': file_size\n }\n\n for key, value in extra_params.items():\n post_data[key] = value\n\n return self.send_message(post_data)\n\n def send_image(self, to, image_file_url, image_file_name, image_file_size, image_width=0,\n image_height=0, thumbnail_file_url=None, caption='', keyboard=None):\n\n image_file_type = 'IMAGE'\n extra_params = {\n 'imageWidth': 0,\n 'imageHeight': 0,\n 'thumbnailUrl': ''\n }\n\n if int(image_width) and int(image_height):\n extra_params['imageWidth'] = int(image_width)\n extra_params['imageHeight'] = int(image_height)\n if thumbnail_file_url:\n extra_params['thumbnailUrl'] = str(thumbnail_file_url)\n if keyboard is not None:\n extra_params['keyboard'] = keyboard\n\n return self.send_file(to, caption, image_file_name, image_file_type, image_file_url, image_file_size,\n extra_params)\n\n def send_gif(self, to, image_file_url, image_file_name, image_file_size, image_width=0,\n image_height=0, thumbnail_file_url=None, caption='', keyboard=None):\n\n gif_file_type = 'GIF'\n extra_params = {\n 'imageWidth': 0,\n 'imageHeight': 0,\n 'thumbnailUrl': ''\n }\n\n if int(image_width) and int(image_height):\n extra_params['imageWidth'] = int(image_width)\n extra_params['imageHeight'] = int(image_height)\n if thumbnail_file_url:\n extra_params['thumbnailUrl'] = str(thumbnail_file_url)\n if keyboard is not None:\n extra_params['keyboard'] = keyboard\n\n return self.send_file(to, caption, image_file_name, gif_file_type, image_file_url, image_file_size,\n extra_params)\n\n def send_video(self, to, video_file_url, video_file_name, video_file_size, video_duration_in_milliseconds,\n video_width=0, video_height=0, thumbnail_file_url=None, caption='', keyboard=None):\n\n video_file_type = 'VIDEO'\n extra_params = {\n 'thumbnailWidth': 0,\n 'thumbnailHeight': 0,\n 'thumbnailUrl': '',\n 'fileDuration': video_duration_in_milliseconds\n }\n\n if int(video_width) and int(video_height):\n extra_params['imageWidth'] = int(video_width)\n extra_params['imageHeight'] = int(video_height)\n if thumbnail_file_url:\n extra_params['thumbnailUrl'] = str(thumbnail_file_url)\n if keyboard is not None:\n extra_params['keyboard'] = keyboard\n\n return self.send_file(to, caption, video_file_name, video_file_type, video_file_url, video_file_size,\n extra_params)\n\n def send_voice(self, to, voice_file_url, voice_file_name, voice_file_size, voice_duration_in_milliseconds,\n caption='', keyboard=None):\n\n voice_file_type = 'PUSH_TO_TALK'\n extra_params = {\n 'fileDuration': voice_duration_in_milliseconds\n }\n\n if keyboard is not None:\n extra_params['keyboard'] = keyboard\n\n return self.send_file(to, caption, voice_file_name, voice_file_type, voice_file_url, voice_file_size,\n extra_params)\n\n def send_location(self, to, latitude, longitude, caption='', keyboard=None):\n\n post_data = {\n 'type': 'LOCATION',\n 'latitude': latitude,\n 'longitude': longitude,\n 'to': to,\n 'body': caption\n }\n\n if keyboard is not None:\n post_data['keyboard'] = keyboard\n\n return self.send_message(post_data)\n\n def send_attachment(self, to, file_url, file_name, file_size, caption='', keyboard=None):\n\n file_type = 'ATTACHMENT'\n extra_params = {}\n\n if keyboard is not None:\n extra_params['keyboard'] = keyboard\n\n return self.send_file(to, caption, file_name, file_type, file_url, file_size, extra_params)\n\n def change_keyboard(self, to, keyboard):\n\n post_data = {\n 'type': 'CHANGE',\n 'keyboard': keyboard,\n 'to': to\n }\n\n return self.send_message(post_data)\n\n @staticmethod\n def make_keyboard(keyboard_data):\n keyboard = []\n\n if isinstance(keyboard_data, str):\n rows = keyboard_data.split('\\n')\n for row in rows:\n row_keyboard = []\n row_buttons = row.split('|')\n for button in row_buttons:\n if button == '':\n continue\n row_keyboard.append(\n {\n 'text': button,\n 'command': button\n }\n )\n if row_keyboard:\n keyboard.append(row_keyboard)\n\n elif isinstance(keyboard_data, list):\n for row_data in keyboard_data:\n row_keyboard = []\n for row_button_data in row_data:\n button_data = []\n if isinstance(row_button_data, str):\n button_data = {\n 'text': row_button_data,\n 'command': row_button_data\n }\n elif isinstance(row_button_data, list):\n if len(row_button_data) == 1:\n button_data = {\n 'text': row_button_data[0],\n 'command': row_button_data[0]\n }\n elif len(row_button_data) == 2:\n button_data = {\n 'text': row_button_data[0],\n 'command': row_button_data[1]\n }\n elif isinstance(row_button_data, dict):\n if 'text' in row_button_data:\n if 'command' in row_button_data:\n button_data = {\n 'text': row_button_data['text'],\n 'command': row_button_data['command']\n }\n else:\n button_data = {\n 'text': row_button_data['text'],\n 'command': row_button_data['text']\n }\n\n if len(button_data):\n row_keyboard.append(button_data)\n\n if len(row_keyboard):\n keyboard.append(row_keyboard)\n\n return keyboard\n\n def download_file(self, file_url, save_file_path):\n if not self.token:\n raise ValueError('Invalid bot token')\n\n if not save_file_path:\n raise ValueError('Invalid path for saving file')\n\n if not file_url:\n raise ValueError('Invalid file url')\n\n try:\n response = requests.get(self.get_download_file_url(file_url))\n\n if response.status_code == 200:\n try:\n response_json = json.loads(response.text)\n return [response_json['resultMessage'], False]\n except:\n pass\n with open(save_file_path, 'wb') as file:\n file.write(response.content)\n return [False, save_file_path]\n else:\n return ['Bad Response: ' + str(response.status_code) + ' status code', False]\n\n except Exception as e:\n return [e.args[0], False]\n\n def upload_file(self, file_path):\n if not os.path.isfile(file_path):\n raise ValueError('Invalid file')\n\n try:\n file = {'file': open(file_path, 'rb')}\n response = requests.post(self.get_upload_file_url(), files=file)\n\n if response.status_code == 200:\n if response:\n response_json = json.loads(response.text)\n if 'resultCode' in response_json:\n if response_json['resultCode'] == 200:\n if 'fileUrl' in response_json:\n if response_json['fileUrl']:\n return [False, response_json['fileUrl']]\n return [\"Unknown Upload Error\", False]\n else:\n if 'resulMessage' in response_json:\n return [response_json['resultMessage'], False]\n else:\n return ['Unknown Error', False]\n else:\n return [\"Invalid Response\", False]\n else:\n return [\"Bad Response\", False]\n else:\n return ['Bad Response: ' + str(response.status_code) + ' status code', False]\n except Exception as e:\n return [e.args[0], False]\n","sub_path":"soroush_python_sdk/soroush_python_sdk.py","file_name":"soroush_python_sdk.py","file_ext":"py","file_size_in_byte":12541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"210918141","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL import GLX\n\nimport pyopencl as cl\nimport sys\nimport numpy\n\nclass Particles(object):\n def __init__(self, num, dt, *args, **kwargs):\n self.clinit()\n self.loadProgram(\"cornell.cl\");\n self.totaltime = 0.0\n self.num = num\n self.num_cl = numpy.uint32(num)\n self.dt = numpy.float32(dt)\n\n\n\n\n def loadData(self, pos_vbo, col_vbo, vel):\n import pyopencl as cl\n mf = cl.mem_flags\n self.pos_vbo = pos_vbo\n self.col_vbo = col_vbo\n\n self.pos = pos_vbo.data\n self.col = col_vbo.data\n self.vel = vel\n\n #Setup vertex buffer objects and share them with OpenCL as GLBuffers\n self.pos_vbo.bind()\n #For some there is no single buffer but an array of buffers\n #https://github.com/enjalot/adventures_in_opencl/commit/61bfd373478767249fe8a3aa77e7e36b22d453c4\n try:\n self.pos_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.pos_vbo.buffer))\n self.col_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.col_vbo.buffer))\n except AttributeError:\n self.pos_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.pos_vbo.buffers[0]))\n self.col_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.col_vbo.buffers[0]))\n self.col_vbo.bind()\n\n #pure OpenCL arrays\n self.vel_cl = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=vel)\n self.pos_gen_cl = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.pos)\n self.vel_gen_cl = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.vel)\n\n self.queue.finish()\n\n # set up the list of GL objects to share with opencl\n self.gl_objects = [self.pos_cl, self.col_cl]\n \n\n\n def execute(self, sub_intervals):\n cl.enqueue_acquire_gl_objects(self.queue, self.gl_objects)\n\n global_size = (self.num,)\n local_size_threads = 33 # group size\n\n for i in range(1,64): # choose group size\n if (self.num % i == 0) :\n local_size_threads = i\n\n\n local_size = (local_size_threads,)\n # pos_shared = cl.LocalMemory(4 * local_size_threads)\n # col_shared = cl.LocalMemory(4 * local_size_threads)\n\n kernelargs = (self.pos_cl, \n self.vel_cl,\n self.pos_gen_cl,\n self.vel_gen_cl,\n self.col_cl, \n self.dt,\n self.num_cl)\n\n kernelargsT = (self.pos_gen_cl, \n self.vel_gen_cl,\n self.pos_cl,\n self.vel_cl,\n self.col_cl, \n self.dt,\n self.num_cl)\n for i in xrange(0, sub_intervals):\n self.program.nbody(self.queue, global_size, local_size, *(kernelargs))\n self.program.nbody(self.queue, global_size, local_size, *(kernelargsT)) # change role of kernelargs to do double buffered calc\n cl.enqueue_release_gl_objects(self.queue, self.gl_objects)\n self.queue.finish()\n self.totaltime += 2*self.dt\n sys.stdout.write(\"\\rT = {0} fm/c>\".format(self.totaltime))\n sys.stdout.flush()\n \n\n \n def clinit(self):\n plats = cl.get_platforms()\n from pyopencl.tools import get_gl_sharing_context_properties\n self.ctx = cl.Context(properties=get_gl_sharing_context_properties(),\n devices=[])\n self.queue = cl.CommandQueue(self.ctx)\n\n def loadProgram(self, filename):\n #read in the OpenCL source file as a string\n f = open(filename, 'r')\n fstr = \"\".join(f.readlines())\n #print fstr\n #create the program\n self.program = cl.Program(self.ctx, fstr).build()\n\n\n def render(self):\n \n glEnable(GL_POINT_SMOOTH)\n glPointSize(2)\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n #setup the VBOs\n self.col_vbo.bind()\n glColorPointer(4, GL_FLOAT, 0, self.col_vbo)\n\n self.pos_vbo.bind()\n glVertexPointer(4, GL_FLOAT, 0, self.pos_vbo)\n\n glEnableClientState(GL_VERTEX_ARRAY)\n glEnableClientState(GL_COLOR_ARRAY)\n #draw the VBOs\n glDrawArrays(GL_POINTS, 0, self.num)\n\n glDisableClientState(GL_COLOR_ARRAY)\n glDisableClientState(GL_VERTEX_ARRAY)\n\n glDisable(GL_BLEND)\n \n\n","sub_path":"physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"325146234","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport math\nimport numpy as np\nimport random\nfrom tqdm import trange\n\nfrom io import BytesIO\nfrom urllib.request import urlopen\nfrom zipfile import ZipFile\n\nfrom math import sqrt\nfrom pandas import read_csv, DataFrame\nfrom scipy import stats\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\n\nimport argparse\nimport utils\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='elect', help='Name of the dataset')\nparser.add_argument('--data-folder', default='data', help='Parent dir of the dataset')\nparser.add_argument('--model-name', default='base_model_24', help='Directory containing params.json')\nparser.add_argument('--file-name', default='electricity.csv', help='Directory containing data.csv')\nparser.add_argument('--test', action='store_true', help='Whether to use test set for validation') # default=False\nparser.add_argument('--hop', action='store_true', help='Whether to use test set for validation') # default=False\n\n\n\ndef prep_data(data, covariates, data_start, train = True):\n #print(\"train: \", train)\n time_len = data.shape[0]\n #print(\"time_len: \", time_len)\n input_size = window_size-stride_size\n windows_per_series = np.full((num_series), (time_len-input_size) // stride_size)\n #print(\"windows pre: \", windows_per_series.shape)\n if train: windows_per_series -= (data_start+stride_size-1) // stride_size\n #print(\"data_start: \", data_start.shape)\n #print(data_start)\n #print(\"windows: \", windows_per_series.shape)\n #print(windows_per_series)\n total_windows = np.sum(windows_per_series)\n x_input = np.zeros((total_windows, window_size, 1 + num_covariates + 1), dtype='float32')\n label = np.zeros((total_windows, window_size), dtype='float32')\n v_input = np.zeros((total_windows, 2), dtype='float32')\n #cov = 3: ground truth + age + day_of_week + hour_of_day + num_series\n #cov = 4: ground truth + age + day_of_week + hour_of_day + month_of_year + num_series\n count = 0\n if not train:\n covariates = covariates[-time_len:]\n for series in trange(num_series):\n cov_age = stats.zscore(np.arange(total_time-data_start[series]))\n if train:\n covariates[data_start[series]:time_len, 0] = cov_age[:time_len-data_start[series]]\n else:\n covariates[:, 0] = cov_age[-time_len:]\n for i in range(windows_per_series[series]):\n if train:\n window_start = stride_size*i+data_start[series]\n else:\n window_start = stride_size*i\n window_end = window_start+window_size\n '''\n print(\"x: \", x_input[count, 1:, 0].shape)\n print(\"window start: \", window_start)\n print(\"window end: \", window_end)\n print(\"data: \", data.shape)\n print(\"d: \", data[window_start:window_end-1, series].shape)\n '''\n x_input[count, 1:, 0] = data[window_start:window_end-1, series]\n x_input[count, :, 1:1+num_covariates] = covariates[window_start:window_end, :]\n x_input[count, :, -1] = series\n label[count, :] = data[window_start:window_end, series]\n nonzero_sum = (x_input[count, 1:input_size, 0]!=0).sum()\n if nonzero_sum == 0:\n v_input[count, 0] = 0\n else:\n v_input[count, 0] = np.true_divide(x_input[count, 1:input_size, 0].sum(),nonzero_sum)+1\n x_input[count, :, 0] = x_input[count, :, 0]/v_input[count, 0]\n if train:\n label[count, :] = label[count, :]/v_input[count, 0]\n count += 1\n prefix = os.path.join(save_path, 'train_' if train else 'test_')\n np.save(prefix+'data_'+save_name, x_input)\n np.save(prefix+'v_'+save_name, v_input)\n np.save(prefix+'label_'+save_name, label)\n\ndef gen_covariates(times, num_covariates):\n covariates = np.zeros((times.shape[0], num_covariates))\n for i, input_time in enumerate(times):\n covariates[i, 1] = input_time.weekday()\n covariates[i, 2] = input_time.hour\n covariates[i, 3] = input_time.month\n for i in range(1,num_covariates):\n covariates[:,i] = stats.zscore(covariates[:,i])\n return covariates[:, :num_covariates]\n\ndef visualize(data, week_start):\n x = np.arange(window_size)\n f = plt.figure()\n plt.plot(x, data[week_start:week_start+window_size], color='b')\n f.savefig(\"visual.png\")\n plt.close()\n\nif __name__ == '__main__':\n\n global save_path\n # Load the parameters from json file\n args = parser.parse_args()\n model_dir = os.path.join('experiments', args.model_name)\n json_path = os.path.join(model_dir, 'params.json')\n #data_dir = os.path.join('data', args.dataset)\n assert os.path.isfile(json_path), f'No json configuration file found at {json_path}'\n params = utils.Params(json_path)\n\n\n name = args.file_name #'electricity.csv'\n save_name = args.dataset #'elect'\n window_size = params.train_window #192\n stride_size = params.predict_steps #24\n num_covariates = 4\n\n #test = False\n # print('args.test: ', args.test)\n\n if not args.test:\n train_start = '2012-01-01 00:00:00'\n train_end = '2013-10-19 23:00:00' #'2014-06-30 23:00:00'\n test_start = '2013-10-13 00:00:00' #need additional 7 days as given info '2014-06-24 00:00:00'\n test_end = '2014-05-26 23:00:00' #2014-12-28\n else:\n train_start = '2012-01-01 00:00:00'\n train_end = '2013-10-19 23:00:00' #'2014-06-30 23:00:00'\n test_start = '2014-05-20 00:00:00' #need additional 7 days as given info '2014-06-24 00:00:00'\n test_end = '2014-12-31 23:00:00' #2014-12-28\n\n if args.hop:\n train_start = '2012-01-01 00:00:00'\n train_end = '2012-04-30 23:00:00' #'2014-06-30 23:00:00'\n test_start = '2012-04-24 00:00:00' #need additional 7 days as given info '2014-06-24 00:00:00'\n test_end = '2012-05-31 23:00:00' #2014-12-28\n\n pred_days = 7\n given_days = 7\n\n save_path = model_dir # os.path.join('data', save_name)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n csv_path = os.path.join(args.data_folder, save_name, name) #os.path.join(save_path, name)\n\n data_frame = pd.read_csv(csv_path, sep=\",\", index_col=0, parse_dates=True, decimal=',').astype(float)\n #data_frame = data_frame.resample('1H',label = 'left',closed = 'right').sum()[train_start:test_end]\n data_frame.fillna(0, inplace=True)\n # data_start = (data_frame!=0).argmax(axis=0) #find first nonzero value in each time series ########### added\n #data_frame = data_frame.drop(data_frame.columns[data_start>=161], axis=1) ########### added\n covariates = gen_covariates(data_frame[train_start:test_end].index, num_covariates)\n train_data = data_frame[train_start:train_end].values\n test_data = data_frame[test_start:test_end].values\n data_start = (train_data!=0).argmax(axis=0) #find first nonzero value in each time series\n total_time = data_frame.shape[0] #32304\n num_series = data_frame.shape[1] #370\n prep_data(train_data, covariates, data_start)\n prep_data(test_data, covariates, data_start, train=False)\n","sub_path":"DeepAR/preprocess_elect.py","file_name":"preprocess_elect.py","file_ext":"py","file_size_in_byte":7355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"78060611","text":"from numpy import*\nm=array(eval(input(\"Matriculas dos estudantes: \")))#Vetor com as matrículas\nq=0\nfor i in range(size(m)):\n\t\n\tif(m[i]%2==1):\n\t\tq+=1\nvetor=zeros(q, dtype=int) # Vetor vazio\nj=0\nfor i in range(size(m)):\n\n\tif(m[i]%2==1):\n\t\tvetor[j]=vetor[j]+m[i]\n\t\tj+=1\nprint(vetor)","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4312/codes/1798_2572.py","file_name":"1798_2572.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"396101190","text":"class Solution:\r\n # @param {integer} x\r\n # @return {integer}\r\n def reverse(self, x):\r\n strx = str(x)\r\n if strx[0] == '-':\r\n result = int(strx[:0:-1])\r\n sign = -1\r\n else:\r\n result = int(strx[::-1])\r\n sign = 1\r\n if result > pow(2, 31):\r\n return 0\r\n else:\r\n return sign*result\r\n \r\n \r\n \r\n\r\n\r\n\r\n \r\ndef check(inputs, truth):\r\n solver = Solution()\r\n\r\n ans = solver.reverse(inputs)\r\n if (ans != truth):\r\n print(ans, truth)\r\n else:\r\n print(\"pass\")\r\n \r\ncheck(123, 321)\r\ncheck(-123, -321)\r\ncheck(1534236469, 0)\r\ncheck(900000, 9)\r\ncheck(-2147483412, -2143847412)\r\ncheck(1463847412, 2147483641)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Mine/7_reverse integer.py","file_name":"7_reverse integer.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"135753161","text":"from storagewrapper import AuthenticateFunctions, BlobFunctions\nfrom behave import given, when, then\nimport os\nfrom unittest import TestCase as tc\n\n\n@given(\"parameters are set up for blob\")\ndef set_up_params(context):\n context.client_id = context.config.userdata.get(\"tenant_id\")\n context.vault_url = context.config.userdata.get(\"vault_url\")\n context.app_id = context.config.userdata.get(\"app_id\")\n context.app_key = context.config.userdata.get(\"app_key\")\n context.storage_account_name = context.config.userdata.get(\"storage_account_name\")\n context.blob_name = \"blob.txt\"\n context.params = {\"client_id\": context.client_id,\n \"app_id\": context.app_id,\n \"app_key\": context.app_key,\n \"vault_backed\": False}\n\n\n@given(\"credential is generated with {authentication_method} for blob\")\ndef generate_credential(context, authentication_method):\n\n context.params[\"authentication_method\"] = authentication_method\n context.authenticator = AuthenticateFunctions(context.params)\n\n\n assert context.authenticator is not None\n\n\n@given(\"BlobFunctions has been instantiated with all permissions\")\ndef instantiate_blob_functions(context):\n context.blob_functions = BlobFunctions(authenticator=context.authenticator, storage_account_name=context.storage_account_name)\n\n\n@when(\"a {container} is created\")\ndef create_test_container(context, container):\n creation_status = context.blob_functions.create_container(container_name=container)\n print(creation_status)\n assert creation_status is True\n\n\n@when(\"a upload to blob function is called to {container}\")\ndef upload_file_to_blob(context, container):\n path_to_file = f\"{os.getcwd()}/data/{context.blob_name}\"\n blob_client = context.blob_functions.upload_blob(blob_name=context.blob_name, data=path_to_file, container_name=container)\n assert blob_client is not None\n\n\n@then(\"all {container} in storage account are listed\")\ndef list_all_containers(context, container):\n list_of_containers = context.blob_functions.list_containers()\n containers_retrieved = []\n for blob_container in list_of_containers:\n containers_retrieved.append(blob_container.name)\n\n assert container in containers_retrieved\n\n\n@then(\"list blobs function is used in {container}\")\ndef use_list_blobs_function(context, container):\n list_blobs = context.blob_functions.list_blobs(container_name=container)\n assert context.blob_name in list_blobs\n\n\n@then(\"blob is deleted from {container}\")\ndef delete_newly_uploaded_blob(context, container):\n blob_deleted = context.blob_functions.delete_blob(context.blob_name, container_name=container)\n assert blob_deleted is True\n\n\n@then(\"blob {container} is deleted\")\ndef delete_new_container(context, container):\n container_deleted = context.blob_functions.delete_container(container_name=container)\n assert container_deleted is True\n","sub_path":"test/features/steps/blob_step_definitions.py","file_name":"blob_step_definitions.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"12969907","text":"\n# def solution(A):\n\n# if len(A) == 1:\n# return True\n\n# for i in range(0, len(A) - 1):\n# for j in range(i, len(A)):\n# A[i], A[j] = A[j], A[i]\n# if sorted(A) == A:\n# return True\n# else:\n# A[i], A[j] = A[j], A[i]\n# return False\n\n\ndef solution(A):\n if len(A) == 1:\n return True\n\n count = 0\n B = sorted(A)\n for i in range(len(B)):\n if A[i] != B[i]:\n count += 1\n\n return count <= 2\n\n\n# result = solution([1, 5, 3, 3, 7])\n# print(result)\n","sub_path":"codility_real_2.py","file_name":"codility_real_2.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"244925948","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 9 18:05:26 2020\n\n@author: pnter\nes\"\"\"\nimport time\nimport SplittingLocalGP\nimport torch\nimport gpytorch\nimport numpy as np\nimport TestData\nimport pandas as pd\n\ndef getKin40():\n predictorsTrain,responseTrain,predictorsTest,responseTest = TestData.kin40()\n return predictorsTrain.double(),responseTrain.double(),predictorsTest.double(),responseTest.double()\n\ndef getPowergen(seed):\n predictorsTrain,responseTrain,predictorsTest,responseTest = TestData.powergen(seed)\n return predictorsTrain.double(),responseTrain.double(),predictorsTest.double(),responseTest.double()\n\ndef makeModel(kernelClass,likelihood,M,splittingLimit,inheritLikelihood):\n\n model = SplittingLocalGP.SplittingLocalGPModel(likelihood,gpytorch.kernels.ScaleKernel(kernelClass(ard_num_dims=4)),\n splittingLimit=splittingLimit,inheritKernel=True,\n inheritLikelihood=inheritLikelihood,\n M=M,\n mean=gpytorch.means.ConstantMean)\n \n return model\n\ndef evalModel(M=None,splittingLimit=500,inheritLikelihood=True,mtype='splitting'):\n #Set RNG seed\n torch.manual_seed(42069)\n \n kernel = gpytorch.kernels.RBFKernel\n\n likelihood = gpytorch.likelihoods.GaussianLikelihood\n \n model = makeModel(kernel,likelihood,M,splittingLimit,inheritLikelihood)\n \n j = 0\n for index in range(int(predictorsTrain.shape[0]))[::splittingLimit]:\n #We don't want to overshoot the number of obs...\n upperIndex = min(index+splittingLimit,int(predictorsTrain.shape[0]))\n x_train = predictorsTrain[index:upperIndex]\n y_train = responseTrain[index:upperIndex]\n \n #Need to unsqueeze if the data is 1d\n if x_train.dim()==1:\n x_train = x_train.unsqueeze(1)\n y_train = y_train\n \n model.update(x_train,y_train)\n \n #This print statement may be uncommented if you're nosy and want to track the progress\n #print('Update {0}'.format(j))\n j += 1\n\n print('Done training')\n \n return model\n\n#Define the parameters we will use for each model tested.\nparamsList = [{'M':None,'splittingLimit':625,'inheritLikelihood':False,'mtype':'splitting'},\n {'M':None,'splittingLimit':300,'inheritLikelihood':False,'mtype':'splitting'},\n {'M':None,'splittingLimit':150,'inheritLikelihood':False,'mtype':'splitting'},\n {'M':None,'splittingLimit':75,'inheritLikelihood':False,'mtype':'splitting'},\n {'M':None,'splittingLimit':30,'inheritLikelihood':False,'mtype':'splitting'}]\n\n#Number of replications to perform. Only necessary to do >1 if randomized\nnumReps = 10\n\n#Fix the random seeds for using CRN with rBCM experiment\ntorch.manual_seed(10101)\nnp.random.seed(10101)\nseeds = [41065,10342,98891,36783,11102,34522,98991,98990,76766,27726]\n\n#Create arrays to store experimental results\nmadArr = torch.zeros((len(paramsList)*numReps,1))\nresultsArr = torch.zeros((len(paramsList)*numReps,1))\ntimeArr = torch.zeros((len(paramsList)*numReps,1))\nrepArr = torch.arange(numReps).repeat_interleave(len(paramsList))\n\n#Up this setting to prevent potential numerical issues if CG hasn't converged in <2000 iterations\nwith gpytorch.settings.max_cg_iterations(20000):\n for i in range(len(paramsList)):\n for rep in range(numReps):\n \n #Take the seed for current rep, an retrieve the powergen dataset. Seed is used to create random 80/20 train/test split\n seed = seeds[rep]\n predictorsTrain,responseTrain,predictorsTest,responseTest = getPowergen(seed)\n \n #Record time for benchmarking\n t0 = time.time()\n\n #Construct and fit model\n model = evalModel(**paramsList[i])\n \n #Create predictions using the test data\n preds = model.predict(predictorsTest)\n\n #Record the stopping time\n t1 = time.time()\n \n #Compute root mean squared error, mean absolute deviation\n rmse = torch.sqrt(torch.mean((preds-responseTest)**2))\n mad = torch.mean(torch.abs(preds-responseTest))\n \n resultsArr[i+rep*len(paramsList)] = rmse\n timeArr[i+rep*len(paramsList)] = t1-t0\n madArr[i+rep*len(paramsList)] = mad\n\n#Create dataframe for storing experiment results\ndf = pd.DataFrame()\n\nconcatParams = []\nfor rep in range(numReps):\n concatParams += paramsList\n\ndf['params'] = concatParams\ndf['time'] = timeArr.detach().numpy()\ndf['rmse'] = resultsArr.detach().numpy()\ndf['mad'] = madArr.detach().numpy()\ndf['replication'] = repArr.detach().numpy()\n\n#Write results to CSV\ndf.to_csv('powergen_results_splitting_10reps.csv')","sub_path":"Experiments/powergen_experiment_splitting.py","file_name":"powergen_experiment_splitting.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"488700886","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom django.views.generic import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth import authenticate\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.template import loader\nfrom users.models import *\nimport json\n# Create your views here.\n\nmethod_decorator(csrf_exempt, name='dispatch')\nclass Login(APIView):\n def post(self, request, *args, **kwargs):\n username=request.data.get('username')\n password=request.data.get('password')\n user=authenticate(username=username, password=password)\n print(username, password)\n if user is not None:\n try:\n print(\"Success\")\n #employee = Users.objects.get(username=username)\n return JsonResponse({'status': 200, 'user':username}, status=200)\n except:\n return JsonResponse({'status': 403}, status=403)\n else:\n return JsonResponse({'status': 403}, status=403)\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass Registration(View):\n def get(self, request, *args, **kwargs):\n data_obj=Customer.objects.all()\n list=[]\n for i in range(len(data_obj)):\n data={\n \"first_name\":data_obj[i].first_name,\n \"middle_name\":data_obj[i].middle_name,\n \"last_name\":data_obj[i].last_name,\n \"mobilenumber\":data_obj[i].mobilenumber,\n \"email\":data_obj[i].email,\n #\"caste\":data_obj[i].caste,\n }\n list.append(data)\n\n dump = json.dumps(list)\n print(data_obj);\n return HttpResponse(dump, content_type='application/json')\n def post(self, request, *args, **kwargs):\n print(\"POST\")\n first_name=request.POST.get('firstName')\n middle_name=request.POST.get('middleName')\n last_name=request.POST.get('lastName')\n mobile_number=request.POST.get('mobileNumber')\n email=request.POST.get('email')\n print(first_name, last_name, age, middle_name, mobile_number, email)\n reg_obj=Customer()\n reg_obj.first_name=first_name\n reg_obj.middle_name=middle_name\n reg_obj.last_name=last_name\n reg_obj.mobile_number=mobile_number\n reg_obj.email=email\n reg_obj.save()\n\n return HttpResponse(\"Registration Success\")\n\n","sub_path":"instagram/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"448087864","text":"import sys\nsys.path.append(\".\")\nfrom multiprocessing import cpu_count\n\nips = [line.rstrip() for line in open('hosts')]\n\nfor host in ips:\n print(host)\n\nstring = \"\"\n\nfor i in range(0, 1200):\n string += \"t\"\n\ntest = str.encode(string, 'utf-8')\n\nprint(test.__sizeof__(), str.encode(str(cpu_count()), 'utf-8'))\n\n\n\nstring = \"doies test\"\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"122978948","text":"import os\nfrom apiclient.discovery import build\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\nfrom google.auth.transport.requests import Request as gRequest\nimport base64\nimport pickle\nimport mimetypes\n\n\nGMAIL_PICKLE = \"/web/usap-dc/htdocs/inc/token.pickle\"\n\n\ndef connect_to_gmail():\n creds = None\n \n if os.path.exists(GMAIL_PICKLE):\n with open(GMAIL_PICKLE, 'rb') as token:\n creds = pickle.load(token)\n else:\n # if the pickle doesn't exist, need to run the bin/gmail_quickstart.py on local system to\n # log in and create token.pickle. Then copy it to inc/token.pickle\n return None, \"Unable to authorise connection to account\"\n\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(gRequest())\n else:\n return None, \"Gmail credentials are not valid\"\n\n service = build('gmail', 'v1', credentials=creds)\n return service, None\n\n\ndef create_gmail_message(sender, recipients, subject, message_text, file=None):\n \"\"\"Create a message for an email.\n\n Args:\n sender: Email address of the sender.\n to: Email address of the receiver.\n subject: The subject of the email message.\n message_text: The text of the email message.\n file: Path to file to be sent as attachment\n\n Returns:\n An object containing a base64url encoded email object.\n \"\"\"\n message = MIMEMultipart('mixed')\n message['To'] = ', '.join(recipients)\n message['From'] = sender\n message['Subject'] = subject\n content = MIMEText(message_text, 'html', 'utf-8')\n message.attach(content)\n\n if file:\n content_type, encoding = mimetypes.guess_type(file)\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n main_type, sub_type = content_type.split('/', 1)\n if main_type == 'text':\n fp = open(file, 'rb')\n msg = MIMEText(fp.read().decode(), _subtype=sub_type)\n fp.close()\n elif main_type == 'image':\n fp = open(file, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'audio':\n fp = open(file, 'rb')\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\n fp.close()\n else:\n fp = open(file, 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n fp.close()\n filename = os.path.basename(file)\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n try:\n raw = base64.urlsafe_b64encode(message.as_bytes()).decode()\n except Exception as e:\n raw = base64.urlsafe_b64encode(message.as_bytes())\n\n return {'raw': raw}\n\n\ndef send(service, user_id, message):\n \"\"\"Send an email message.\n\n Args:\n service: Authorized Gmail API service instance.\n user_id: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n message: Message to be sent.\n\n Returns:\n success and error messages.\n \"\"\"\n success = None\n error = None\n try:\n message = (service.users().messages().send(userId=user_id, body=message)\n .execute())\n # print('Message Id: %s' % message['id'])\n success = \"Email sent\"\n return success, error\n except Exception as error:\n print('An error occurred: %s' % error)\n err = \"Error sending email: \" + str(error)\n return success, err\n\n\ndef send_gmail_message(sender, recipients, subject, message_text, file):\n success = None\n error = None\n\n msg_raw = create_gmail_message(sender, recipients, subject, message_text, file)\n\n service, error = connect_to_gmail()\n if error:\n err = 'ERROR connecting to gmail' + str(error)\n return success, err\n else:\n return send(service, 'me', msg_raw)","sub_path":"bin/gmail_functions.py","file_name":"gmail_functions.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"643100929","text":"import torch\nfrom torch import optim, nn\nimport visdom\nimport torchvision\nfrom torch.utils.data import DataLoader\n\nfrom pokemon import Pokemon\nfrom torchvision.models import resnet18\n\nfrom utils import Flatten\n\nbatchsz = 32\nlr = 1e-3\nepochs = 10\n\ndevice = torch.device('cuda')\ntorch.manual_seed(1234)\n\nroot = '../../datasets/pokemon'\n\ntrain_db = Pokemon(root, 224, mode='train')\nval_db = Pokemon(root, 224, mode='val')\ntest_db = Pokemon(root, 224, mode='test')\ntrain_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,\n num_workers=4)\nval_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)\ntest_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)\n\n\nviz = visdom.Visdom()\n\ndef evalute(model, loader):\n model.eval()\n \n correct = 0\n total = len(loader.dataset)\n\n for x,y in loader:\n x,y = x.to(device), y.to(device)\n with torch.no_grad():\n logits = model(x)\n pred = logits.argmax(dim=1)\n correct += torch.eq(pred, y).sum().float().item()\n\n return correct / total\n\ndef main():\n\n # model = ResNet18(5).to(device)\n trained_model = resnet18(pretrained=True)\n model = nn.Sequential(*list(trained_model.children())[:-1], #[b, 512, 1, 1]\n Flatten(), # [b, 512, 1, 1] => [b, 512]\n nn.Linear(512, 5)\n ).to(device)\n # x = torch.randn(2, 3, 224, 224)\n # print(model(x).shape)\n\n optimizer = optim.Adam(model.parameters(), lr=lr)\n criteon = nn.CrossEntropyLoss()\n\n\n best_acc, best_epoch = 0, 0\n global_step = 0\n viz.line([0], [-1], win='loss', opts=dict(title='loss'))\n viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))\n for epoch in range(epochs):\n\n for step, (x,y) in enumerate(train_loader):\n\n # x: [b, 3, 224, 224], y: [b]\n x, y = x.to(device), y.to(device)\n\n model.train()\n logits = model(x)\n loss = criteon(logits, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n viz.line([loss.item()], [global_step], win='loss', update='append')\n global_step += 1\n\n if epoch % 1 == 0:\n\n val_acc = evalute(model, val_loader)\n if val_acc> best_acc:\n best_epoch = epoch\n best_acc = val_acc\n\n torch.save(model.state_dict(), 'train_transfer.mdl')\n\n viz.line([val_acc], [global_step], win='val_acc', update='append')\n\n print('best acc:', best_acc, 'best epoch:', best_epoch)\n\n model.load_state_dict(torch.load('train_transfer.mdl'))\n print('loaded from ckpt!')\n\n test_acc = evalute(model, test_loader)\n print('test acc:', test_acc)\n\n\nif __name__ == '__main__':\n main()\n\n\"\"\"\n测试结果:\n best acc: 0.9570815450643777 best epoch: 6\n test acc: 0.9401709401709402\n\"\"\"","sub_path":"Pytorch/Pytorch_learn_by_dragen1860/slide/lesson50-迁移学习-自定义数据集-Pokemon/train_transfer.py","file_name":"train_transfer.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"626472392","text":"from enum import Enum\nfrom vnpy.trader.object import BarData, TickData, OrderData, TradeData, ContractData\nfrom vnpy.trader.constant import (Direction, Offset, Exchange,\n Interval, Status)\nfrom typing import Dict, List\n\nfrom vnpy.app.cta_strategy.backtesting import DailyResult\n\n\nclass WhBarTriggerMode(Enum):\n\n SINGLE = 'single' # 每个品种触发一次\n ALL = 'all' # 搜集完成触发一次\n\n\nclass MultiDailyResult:\n \"\"\"\"\"\"\n\n def __init__(self, date: date, close_prices: Dict[str,float], contracts: Dict[str, ContractData]):\n \"\"\"\"\"\"\n self.date = date\n self.close_prices = close_prices\n self.pre_close = 0\n\n self.trades: List[TradeData] = []\n self.trade_count = 0\n\n self.start_poses = 0\n self.end_poses = 0\n\n self.turnover = 0\n self.commission = 0\n self.slippage = 0\n\n self.trading_pnl = 0\n self.holding_pnl = 0\n self.total_pnl = 0\n self.net_pnl = 0\n\n self.contracts = contracts\n\n self.daily_results: Dict[str,DailyResult] = {vt_symbol: DailyResult(\n self.date, close_prices[vt_symbol]) for vt_symbol in contracts.keys()}\n\n def add_trade(self, trade: TradeData):\n \"\"\"\"\"\"\n self.trades.append(trade)\n\n def calculate_pnl(\n self,\n pre_closes: Dict[str,float],\n start_poses: Dict[str,float],\n slippage: float,\n ):\n \"\"\"\"\"\"\n # If no pre_close provided on the first day,\n # use value 1 to avoid zero division error\n\n self.start_poses = start_poses\n self.end_poses = start_poses\n self.slippage = slippage\n\n for trade in self.trades:\n self.daily_results[trade.vt_symbol].add_trade(trade)\n\n for vt_symbol, result in self.daily_results.items():\n result.calculate_pnl(\n pre_close=pre_closes[vt_symbol], \n start_pos=start_poses[vt_symbol],\n size=self.contracts[ vt_symbol].size, \n rate=self.contracts[vt_symbol].rate, \n slippage=self.slippage, \n inverse=self.contracts[vt_symbol].inverse\n )\n self.trading_pnl = self.trading_pnl + result.trading_pnl\n self.holding_pnl = self.holding_pnl + result.holding_pnl\n self.total_pnl = self.total_pnl + result.total_pnl\n self.net_pnl = self.net_pnl + result.net_pnl\n self.turnover = self.turnover + result.turnover\n self.commission = self.commission + result.commission\n\n self.end_poses[vt_symbol] = result.end_pos","sub_path":"vnpy/app/wh_strategy/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"318649319","text":"from tkinter import *\nimport time\ntk=Tk()\nc=Canvas(width=500,height=500)\nc.pack()\nc.create_polygon(10,10,10,60,40,40)\nfor i in range(60):\n c.move(1,6,0)\n tk.update()\n time.sleep(0.05)\n","sub_path":"tk_t.py","file_name":"tk_t.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"284979578","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport abc\nimport ast\nfrom pathlib import Path\nfrom pickle import PicklingError\nfrom typing import Collection, Optional, Sequence, Union\n\nimport libcst as cst\n\nfrom fixit.common.autofix import LintPatch\n\n\nclass BaseLintRuleReport(abc.ABC):\n \"\"\"\n Represents a lint violation. This is generated by calling `self.context.report`\n in your lint rule, and is saved to the context's `reports` list.\n \"\"\"\n\n file_path: Path\n code: str\n message: str\n # This is the line/column where the lint rule reported the violation. `arc lint` may\n # report a different line/column when a patch is applied because it requires that\n # the start of the patch is the same as the reported line/column.\n line: int\n column: int\n\n def __init__(\n self, *, file_path: Path, code: str, message: str, line: int, column: int\n ) -> None:\n self.file_path = file_path\n self.code = code\n self.message = message\n self.line = line\n self.column = column\n\n @property\n def patch(self) -> Optional[LintPatch]:\n return None\n\n def __repr__(self) -> str:\n return f\"{self.line}:{self.column}: {self.code} {self.message}\"\n\n def __reduce__(self) -> None:\n raise PicklingError(\n \"Lint rule reports are potentially very complex objects. They can contain \"\n + \"a syntax tree or an entire module's source code. They should not be \"\n + \"pickled (or returned by a multiprocessing worker). Instead, extract \"\n + \"the fields you care about, and pickle those.\"\n )\n\n\nclass AstLintRuleReport(BaseLintRuleReport):\n def __init__(\n self,\n *,\n file_path: Path,\n node: ast.AST,\n code: str,\n message: str,\n line: int,\n column: int,\n ) -> None:\n super().__init__(\n file_path=file_path, code=code, message=message, line=line, column=column\n )\n self.node = node\n\n\nclass CstLintRuleReport(BaseLintRuleReport):\n def __init__(\n self,\n *,\n file_path: Path,\n node: cst.CSTNode,\n code: str,\n message: str,\n line: int,\n column: int,\n module: cst.MetadataWrapper,\n module_bytes: bytes,\n replacement_node: Optional[Union[cst.CSTNode, cst.RemovalSentinel]] = None,\n ) -> None:\n super().__init__(\n file_path=file_path, code=code, message=message, line=line, column=column\n )\n self.node = node\n self.module = module\n self.module_bytes = module_bytes\n self.replacement_node = replacement_node\n self._cached_patch: Optional[LintPatch] = None\n\n # Ideally this would use functools.cached_property, but that's only in py3.8+.\n @property\n def patch(self) -> Optional[LintPatch]:\n \"\"\"\n Computes and returns a `LintPatch` object.\n \"\"\"\n replacement_node = self.replacement_node\n if replacement_node is None:\n return None\n cached = self._cached_patch\n if cached is None:\n cached = LintPatch.get(\n wrapper=self.module,\n original_node=self.node,\n replacement_node=replacement_node,\n ).minimize()\n self._cached_patch = cached\n return cached\n\n\nclass LintFailureReportBase(abc.ABC):\n \"\"\"An implementation needs to be a dataclass.\"\"\"\n\n @staticmethod\n @abc.abstractmethod\n def create_reports(\n path: Path, exception_traceback: str, **kwargs: object\n ) -> Sequence[\"LintFailureReportBase\"]:\n ...\n\n\nclass LintSuccessReportBase(abc.ABC):\n \"\"\"An implementation needs to be a dataclass.\"\"\"\n\n @staticmethod\n @abc.abstractmethod\n def create_reports(\n path: Path, reports: Collection[BaseLintRuleReport], **kwargs: object\n ) -> Sequence[\"LintSuccessReportBase\"]:\n ...\n","sub_path":"fixit/common/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"204004888","text":"from collections import deque\n\ndef solve(quantity):\n orders = [int(el) for el in input().split()]\n orders = deque(orders)\n is_p = True\n while orders:\n if is_p:\n print(max(orders))\n is_p = False\n\n if quantity >= (orders[0]):\n quantity -= (orders.popleft())\n\n else:\n print(f'Orders left: {\" \".join(map(str,orders))}')\n exit(0)\n print('Orders complete')\n\n\nsolve(int(input()))\n\n","sub_path":"python-dev-advanced/list_as_stacks_and_queues_ex/03_fast_food.py","file_name":"03_fast_food.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"387883111","text":"class Solution(object):\n def lengthOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n \n mem = [nums[0]]\n \n for i in range(1, len(nums)):\n if (mem[-1] < nums[i]):\n mem.append(nums[i])\n else:\n pos = bisect.bisect_left(mem, nums[i])\n mem[pos] = nums[i]\n \n return len(mem)\n ","sub_path":"300. Longest Increasing Subsequence/300_nlogn.py","file_name":"300_nlogn.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"325119811","text":"# coding: utf-8\n\n__author__ = 'secondwtq'\n\nimport tornado.escape\nimport tornado.gen\nimport json\nimport collections\nimport tornado.concurrent\n\nimport cbr_authentication\nfrom cbr_common import *\nfrom cbr_db import rdb\nimport cbr_utils\n\n\nclass ChatterError(Exception):\n\tpass\n\n\nclass ChatterNodeNotFoundException(ChatterError):\n\tpass\n\n\nclass ChatterMessageNotFoundException(ChatterError):\n\tpass\n\n\nclass ChatterIDTypeException(ChatterError):\n\tpass\n\n\nclass ChatterRedis(object):\n\n\t@staticmethod\n\tdef init():\n\t\trdb().set('g:chatter_id_max', 0)\n\n\t@staticmethod\n\tdef newid():\n\t\tret = int(rdb().get('g:chatter_id_max'))\n\t\trdb().incr('g:chatter_id_max')\n\t\treturn ret\n\n\t@staticmethod\n\tdef id_is_msg(src):\n\t\t# TODO: smarter approach to validate node\n\t\treturn rdb().hexists('chatter:%d' % int(src), 'creator_id') == False\n\n\nclass ChatterRedisNode(object):\n\n\tdef __init__(self, node_id, strict=True):\n\t\tnode_id = int(node_id)\n\t\tif (not strict) or ChatterRedisNode.has_node_id(node_id):\n\t\t\tself.nid = node_id\n\t\telse:\n\t\t\traise ChatterNodeNotFoundException(node_id)\n\n\tdef name(self):\n\t\treturn rdb().hget('chatter:%d' % self.nid, 'name')\n\n\tdef remove(self):\n\t\trdb().srem('g:chatter_node_set', self.nid)\n\t\trdb().hdel('g:chatter_node_dict', ChatterNode.node_name_from_id(self.nid))\n\t\trdb().lrem('g:chatter_nodes', 1, self.nid)\n\n\tdef summary(self):\n\n\t\tret = rdb().hgetall('chatter:%d' % self.nid)\n\n\t\tret['id'] = int(ret['id'])\n\t\tret['creator_id'] = int(ret['creator_id'])\n\t\tret['creation_time'] = int(ret['creation_time'])\n\n\t\tret['creator_name'] = cbr_authentication.Cnesoc().user_name(ret['creator_id'], False)\n\n\t\treturn ret\n\n\tdef summaryd(self, start=0, end=-1):\n\n\t\tret = self.summary()\n\t\tret['children_id'] = list(self.children_id(start, end))\n\t\treturn ret\n\n\tdef children_id(self, start=0, end=-1):\n\t\treturn (int(i) for i in rdb().lrange('chatter:%d:children' % self.nid, start, end))\n\n\tdef children(self, start=0, end=-1):\n\t\treturn (ChatterRedisMessage(i) for i in self.children_id(start, end))\n\n\t@staticmethod\n\tdef has_node_id(node_id):\n\t\treturn rdb().sismember('g:chatter_node_set', int(node_id))\n\n\t@staticmethod\n\tdef node_name_from_id(node_id):\n\t\treturn str(rdb().hget('chatter:%d' % int(node_id), 'name'))\n\n\t@staticmethod\n\tdef node_ids():\n\t\treturn [int(i) for i in rdb().lrange('g:chatter_nodes', 0, -1)]\n\n\nclass ChatterRedisMessage(object):\n\n\tdef __init__(self, msg_id, strict=True):\n\t\tmsg_id = int(msg_id)\n\t\tif not strict or ChatterRedisMessage.has_msg_id(msg_id):\n\t\t\tif strict and not Chatter.id_is_msg(msg_id):\n\t\t\t\traise ChatterIDTypeException(msg_id)\n\t\t\tself.mid = msg_id\n\t\telse:\n\t\t\traise ChatterMessageNotFoundException(msg_id)\n\n\tdef children_id(self, start=0, end=-1):\n\t\treturn (int(i) for i in rdb().lrange('chatter:%d:children' % self.mid, start, end))\n\n\tdef children(self, start=0, end=-1):\n\t\treturn (ChatterRedisMessage(i) for i in self.children_id(start, end))\n\n\tdef summary(self):\n\t\tret_d = rdb().hgetall('chatter:%d' % self.mid)\n\n\t\treturn {\n\t\t\t'id': int(ret_d['id']),\n\t\t\t'title': ret_d['title'],\n\t\t\t'content': ret_d['content'],\n\t\t\t'date': int(ret_d['date']),\n\t\t\t'parent_id': int(ret_d['parent_id']),\n\t\t\t'author_id': int(ret_d['author_id']),\n\t\t\t'author_name': str(cbr_authentication.Cnesoc().user_name(int(ret_d['author_id']), False)),\n\t\t\t'node_id': int(ret_d['node_id']),\n\t\t\t'node_name': ChatterNode(int(ret_d['node_id']), False).name()\n\t\t}\n\n\tdef summaryd(self, start=0, end=-1):\n\n\t\tret = self.summary()\n\t\tret['children_id'] = list(self.children_id(start, end))\n\t\treturn ret\n\n\tdef parent_id(self):\n\t\treturn int(rdb().hget('chatter:%d' % self.mid, 'parent_id'))\n\n\tdef parent(self):\n\t\tpid = self.parent_id()\n\t\tif Chatter.id_is_msg(pid):\n\t\t\treturn ChatterMessage(pid)\n\t\telse:\n\t\t\treturn ChatterNode(pid)\n\n\t@staticmethod\n\tdef has_msg_id(msg_id):\n\t\treturn rdb().exists('chatter:%d' % int(msg_id))\n\n\nclass ChatterMessageBuffer(object):\n\n\tdef __init__(self):\n\t\tself.future_buffer = collections.defaultdict(set)\n\n\tdef register_on(self, watch_id, cursor=-1):\n\t\tret = tornado.concurrent.Future()\n\n\t\tself.future_buffer[watch_id].add(ret)\n\t\treturn ret\n\n\tdef cancel_wait(self, watch_id, future):\n\t\tprint('canceling wait for %d ...' % watch_id)\n\t\tself.future_buffer[watch_id].remove(future)\n\t\t# future.set_result(None)\n\n\tdef notify(self, msg_id):\n\n\t\tdef notify_helper(msg_id_i, org_msg):\n\t\t\tret = ChatterMessage(org_msg, False)\n\t\t\tfor f in self.future_buffer[msg_id_i]:\n\t\t\t\tf.set_result([ret])\n\t\t\tif Chatter.id_is_msg(msg_id_i):\n\t\t\t\tnotify_helper(ChatterMessage(msg_id_i, False).parent_id(), org_msg)\n\n\t\tnotify_helper(msg_id, msg_id)\n\n\nChatter = ChatterRedis\nChatterNode = ChatterRedisNode\nChatterMessage = ChatterRedisMessage\n\nglobal_buffer = ChatterMessageBuffer()\n\n\n# /chatter/node/%d/fetch\n# GET\n#\nclass ChatterFetchHandler(cbr_authentication.AuthedHandlerBase):\n\n\tdef __init__(self, application, request, **kwargs):\n\n\t\t# to suppress warnings ..\n\t\tself.watch_id = None\n\t\tself.future = None\n\t\tself.closed = True\n\n\t\tsuper(ChatterFetchHandler, self).__init__(application, request, **kwargs)\n\n\t@tornado.gen.coroutine\n\tdef get(self, node_id):\n\t\tnode_id = int(node_id)\n\n\t\tif not ChatterNode.has_node_id(node_id):\n\t\t\traise HTTPNotFoundError()\n\n\t\tself.watch_id = int(self.get_argument('watchon', default=str(node_id)))\n\t\tself.future = global_buffer.register_on(self.watch_id)\n\t\tself.closed = False\n\t\tret = yield self.future\n\t\tif self.request.connection.stream.closed():\n\t\t\treturn\n\t\tself.write(json.dumps([msg.summary() for msg in ret]))\n\t\tglobal_buffer.cancel_wait(self.watch_id, self.future)\n\t\tself.closed = True\n\n\tdef on_connection_close(self):\n\t\tprint('closing connection for waiting %d ...' % self.watch_id)\n\t\tif not self.closed:\n\t\t\tglobal_buffer.cancel_wait(self.watch_id, self.future)\n\n\ndef re_summary(h):\n\treturn {\n\t\t'id': int(h['id']),\n\t\t'title': h['title'],\n\t\t'content': h['content'],\n\t\t'date': int(h['date']),\n\t\t'parent_id': int(h['parent_id']),\n\t\t'author_id': int(h['author_id']),\n\t\t'author_name': str(cbr_authentication.Cnesoc().user_name(int(h['author_id']), False)),\n\t\t'node_id': int(h['node_id']),\n\t}\n\n\n# /chatter/node/%d/chat\n# GET/POST/DELETE\n#\nclass ChatterPostHandler(cbr_authentication.AuthedHandlerBase):\n\tdef get(self, node_id):\n\t\tnode_id = int(node_id)\n\n\t\tif not ChatterNode.has_node_id(node_id):\n\t\t\traise HTTPNotFoundError()\n\n\t\tif self.get_argument('id', default='-1') == '-1':\n\t\t\tnode = ChatterNode(node_id, False)\n\t\t\tret = node.summary()\n\t\t\tret['children_id'] = list(node.children_id())\n\t\t\t# ret['children'] = [child.summary() for child in node.children()]\n\n\t\t\t# stackoverflow.com/questions/14713084/how-to-see-set-get-in-redis-log\n\t\t\t# also very useful when tweaking performance\n\t\t\tpipelined = rdb().pipeline(transaction=False)\n\t\t\tfor child_id in ret['children_id']:\n\t\t\t\tpipelined.hgetall('chatter:%d' % child_id)\n\t\t\tret['children'] = [re_summary(h) for h in pipelined.execute()]\n\n\t\t\tself.write(json.dumps(ret))\n\n\t\t\treturn\n\t\telse:\n\t\t\tmsg_id = int(self.get_argument('id', default='-1'))\n\t\t\ttry:\n\t\t\t\tmsg = ChatterMessage(msg_id)\n\t\t\t\tret = msg.summary()\n\t\t\t\tret['children_id'] = list(msg.children_id())\n\n\t\t\t\tpipelined = rdb().pipeline(transaction=False)\n\t\t\t\tfor child_id in ret['children_id']:\n\t\t\t\t\tpipelined.hgetall('chatter:%d' % child_id)\n\t\t\t\tret['children'] = [re_summary(h) for h in pipelined.execute()]\n\n\t\t\t\tself.write(json.dumps(ret))\n\t\t\texcept ChatterMessageNotFoundException:\n\t\t\t\traise HTTPNotFoundError()\n\n\tdef delete(self, node_id):\n\t\tnode_id = int(node_id)\n\n\t\tif not ChatterNode.has_node_id(node_id):\n\t\t\traise HTTPNotFoundError()\n\n\t\tif self.get_argument('id', default=None) is None:\n\t\t\traise HTTPBadRequestError()\n\n\t\tmsg_id = int(self.get_argument('id', default=None))\n\t\trdb().lrem('chatter:%d:children' % ChatterMessage(msg_id).parent_id(), 1, msg_id)\n\n\tdef post(self, node_id):\n\t\tuser_id = self.get_current_user()\n\t\tif user_id is None:\n\t\t\traise HTTPUnauthorizedError()\n\n\t\tnode_id = int(node_id)\n\t\tdata = tornado.escape.json_decode(self.request.body)\n\t\tif 'parent_id' not in data:\n\t\t\tdata['parent_id'] = node_id\n\n\t\tif len(data['title']) < 3:\n\t\t\traise HTTPBadRequestError()\n\n\t\tpost_id = Chatter.newid()\n\t\tpost_obj = {\n\t\t\t'id': post_id,\n\t\t\t'author_id': user_id,\n\t\t\t'title': data['title'],\n\t\t\t'content': data['content'],\n\t\t\t'parent_id': int(data['parent_id']),\n\t\t\t'node_id': node_id,\n\t\t\t'date': cbr_utils.current_time()\n\t\t}\n\n\t\trdb().hmset('chatter:%d' % post_id, post_obj)\n\t\trdb().lpush('chatter:%d:children' % int(data['parent_id']), post_id)\n\n\t\tglobal_buffer.notify(post_id)\n\n\n# /chatter/node/%d\n# GET/DELETE (POST)\n#\nclass ChatterNodeHandler(cbr_authentication.AuthedHandlerBase):\n\tdef get(self, node_id):\n\t\tnode_id = int(node_id)\n\t\ttry:\n\t\t\tself.write(json.dumps(ChatterNode(node_id).summary()))\n\t\texcept ChatterNodeNotFoundException:\n\t\t\traise HTTPNotFoundError()\n\n\tdef delete(self, node_id):\n\t\tif not self.get_current_user():\n\t\t\traise HTTPUnauthorizedError()\n\n\t\tnode_id = int(node_id)\n\n\t\tif not ChatterNode.has_node_id(node_id):\n\t\t\traise HTTPNotFoundError()\n\n\t\tChatterNode(node_id).remove()\n\n\tdef post(self, node_id):\n\t\tpass\n\n\n# /chatter/node\n# GET/POST\n#\nclass ChatterNodeRootHandler(cbr_authentication.AuthedHandlerBase):\n\n\tdef get(self):\n\t\tself.write(json.dumps([ChatterNode(i).summary() for i in ChatterNode.node_ids()]))\n\n\tdef post(self):\n\t\tif not self.get_current_user():\n\t\t\traise HTTPUnauthorizedError()\n\n\t\tnode_id = Chatter.newid()\n\t\tdata = tornado.escape.json_decode(self.request.body)\n\n\t\tnode_obj = {\n\t\t\t'id': int(node_id),\n\t\t\t'name': str(data['name']),\n\t\t\t'creator_id': int(self.get_current_user()),\n\t\t\t'creation_time': int(cbr_utils.current_time())\n\t\t}\n\n\t\trdb().hmset('chatter:%d' % node_id, node_obj)\n\t\trdb().lpush('g:chatter_nodes', node_id)\n\t\trdb().sadd('g:chatter_node_set', node_id)\n\t\trdb().hset('g:chatter_node_dict', str(data['name']), node_id)\n\n\t\tself.write(json.dumps(ChatterNode(node_id).summary()))\n","sub_path":"cbr_chatter.py","file_name":"cbr_chatter.py","file_ext":"py","file_size_in_byte":9741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"72348368","text":"baseStat = 39 #input the base value of your weapon here\nmaxUpgrades = 60 #input your max upgrade level here\nupgradeLevel = 1 #don't touch this line\ncounter = 0\ni = 1\nupgrade = 1\ntimesIncreasedByTen = 0\n\nfor i in range(maxUpgrades):\n upgradeLevel = baseStat * 0.05\n counter = counter + 1\n upgrade = counter\n if upgradeLevel >= 10:\n timesIncreasedByTen += 1\n upgradeLevel = 10\n else:\n print(f'Item stats increased by {round(upgradeLevel)} at level {upgrade}! ({round(baseStat)} total)')\n baseStat = baseStat + upgradeLevel\nif timesIncreasedByTen:\n print(f'Item stats reached max increment (10); increased by 10 {timesIncreasedByTen} times. ({upgrade} times in total)')\nprint(f'Your item\\'s max potential is {round(baseStat)}.')\n","sub_path":"StatCalculator.py","file_name":"StatCalculator.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"44071200","text":"from bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\nfrom selenium.webdriver import DesiredCapabilities\n\nimport MySQLdb\nimport requests\nimport re\nimport os\nimport glob\nimport json\n\n\ndef _insert_data(data):\n BASE_URL = os.environ.get('BASE_URL')\n con = MySQLdb.connect(user='root',\n passwd=os.environ.get('MYSQL_PASSWD'),\n host='localhost')\n cur = con.cursor()\n cur.execute('CREATE DATABASE IF NOT EXISTS sc;')\n cur.execute('use sc;')\n cur.execute('DROP TABLE IF EXISTS names;')\n cur.execute('''\n CREATE TABLE names(\n id INT PRIMARY KEY AUTO_INCREMENT,\n url TEXT,\n filename TEXT,\n status CHAR(4),\n status2 CHAR(4),\n created DATETIME\n );\n ''')\n insert_sql = 'INSERT INTO `sc`.`names` (`url`, `filename`, `status`, `status2`, `created`) \\\n VALUES (%(url)s, %(fs)s, %(s)s,%(s2)s, now());'\n data = [{'url': BASE_URL+'q={}+{}+site:{}+filetype:PDF&tbs=qdr:y2&ie=UTF-8'\n .format(d1.replace(' ', '+'),\n re.sub(r'[,|&]',\n '',\n d2[1]).replace(' ', '+').replace('++', '+'),\n d3.replace(' ', '+')),\n 's': '0',\n 's2': '0',\n 'fs': './pdfs/{}/{}/{}/'.format(d1.replace(' ', '_'), d2[0],\n re.sub(r'\\.[a-z]{2,4}', '', d3).replace(' ', '_'))}\n for d1 in data[0] for d2 in data[1] for d3 in data[2]]\n\n cur.executemany(insert_sql, data)\n con.commit()\n cur.close\n con.close\n\n\ndef _make_dirs(data):\n for d1 in data[0]:\n for d2 in data[1]:\n for d3 in data[2]:\n os.makedirs('./pdfs/{}/{}/{}/'\n .format(d1.replace(' ', '_'), d2[0],\n re.sub(r'\\.[a-z]{2,4}', '', d3).replace(' ', '_')))\n\n\ndef _load(flag=False):\n if flag:\n with open('country.txt', 'r') as f1:\n country = f1.read()\n with open('sector.txt', 'r') as f2:\n sector = f2.read()\n with open('consulting.txt', 'r') as f3:\n consulting = f3.read()\n data1 = country.split('\\n')[:-1]\n data2 = [l.split(',') for l in sector.split('\\n')[1:-1]]\n data3 = consulting.split('\\n')[:-1]\n print('files loaded!')\n _insert_data((data1, data2, data3))\n print('urls complete!')\n _make_dirs((data1, data2, data3))\n print('dirs complete!')\n return\n else:\n con = MySQLdb.connect(user='root',\n passwd=os.environ.get('MYSQL_PASSWD'),\n host='localhost',\n db=\"sc\")\n cur = con.cursor()\n select_sql = 'SELECT url, filename \\\n FROM `sc`.`names` \\\n WHERE status != 404 \\\n AND (status2 != 404 AND status2 != 200 AND status2 != 111)'\n cur.execute(select_sql)\n urls = []\n fs = []\n for data in cur.fetchall():\n urls.append(data[0])\n fs.append(data[1])\n cur.close\n con.close\n return (urls, fs)\n\n\ndef prepare(flag=False):\n if flag:\n _load(flag=True)\n return\n\n urls, file_names = _load()\n return {'urls': urls, 'fs': file_names}\n\n\ndef get_ip():\n try:\n proxy_ip1 = os.environ.get('PROXY_IP1')\n res = requests.get(proxy_ip1)\n soup = bs(res.text, 'html.parser')\n hosts = [i.text for i in soup.findAll('td', attrs={'class', 'host'})]\n ports = [i.text for i in soup.findAll('td', attrs={'class', 'port'})]\n d1 = ['{}:{}'.format(d[0], d[1]) for d in zip(hosts, ports)]\n print('PROXY_IP1 done!')\n return d1\n except:\n d1 = []\n print('PROXY_IP1 error!')\n\n # try:\n # proxy_ip2 = os.environ.get('PROXY_IP2')\n # desired_capabilities = DesiredCapabilities.PHANTOMJS.copy()\n # desired_capabilities['phantomjs.page.customHeaders.User-Agent'] = os.environ.get('USER_AGENT')\n # driver = webdriver.PhantomJS(desired_capabilities=desired_capabilities)\n # driver.get(proxy_ip2)\n # html = driver.page_source\n # soup2 = bs(html, 'html.parser')\n # d2 = [el.previousSibling.previousSibling.text for el in soup2.findAll('td',\n # attrs={'class': 'A'})]\n # print('PROXY_IP2 done!')\n # except:\n # d2 = []\n # print('PROXY_IP2 error!')\n #\n # try:\n # proxy_ip3 = os.environ.get('PROXY_IP3')\n # driver.get(proxy_ip3)\n # html2 = driver.page_source\n # soup3 = bs(html2, 'html.parser')\n # evenE = [e.find('a').text+':'+e.find('a').findNext().text\n # for e in soup3.findAll('tr', attrs={'class', 'Even'})\n # if e.find('a')]\n # oddEl = [e.find('a').text+':'+e.find('a').findNext().text\n # for e in soup3.findAll('tr', attrs={'class', 'Odd'})\n # if e.find('a')]\n # d3 = evenE + oddEl\n # return d3\n # print('PROXY_IP3 done!')\n # except:\n # d3 = []\n # print('PROXY_IP3 error!')\n # return list(set(d1 + d2 + d3))\n\n\ndef validate():\n urls, fs = _load()\n # fs = ['./pdfs/{}/{}/{}'\n # .format(d1.replace(' ', '_'),\n # re.sub(r'[,|&]', '', d2).replace(' ', '_').replace('__', '_'),\n # re.sub(r'\\.[a-z]{2,4}', '', d3)\n # .replace(' ', '_')) for d1 in data[0] for d2 in data[1] for d3 in data[2]]\n output = {}\n for f in fs:\n pdf_files = glob.glob(f+'/*.pdf')\n json_files = glob.glob(f+'/*.json')\n jd = json.load(open(json_files[0], 'r')) if json_files else {}\n if len(jd.keys()) != len(pdf_files) and len(jd.keys()) > 0:\n output[f] = []\n for key in jd.keys():\n output[f].append(jd[key]['url'])\n with open('validation.json', 'w') as o:\n json.dump(output, o, ensure_ascii=False, indent=4, separators=(',', ': '))\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"107841359","text":"# Created by zty on 2019/2/20\nimport matplotlib\nmatplotlib.use('Agg')\nimport sys\nsys.path.append(\"/usr/local/python3/lib/python3.5/site-packages\")\nimport pandas as pd\nfrom my_functions import cal_region_methylation_level,timer\nimport os\nimport glob\nimport argparse\nimport uuid\nimport shutil\nimport multiprocessing\n\nchrList = ['chr20', 'chr10', 'chr2', 'chr5', 'chr9', 'chr6', 'chr3', 'chr19', 'chr13', 'chr7','chr15',\n 'chr4', 'chr21', 'chr8', 'chr12', 'chr16', 'chr17', 'chr18', 'chr11', 'chr1','chr14', 'chrX', 'chr22', 'chrY', 'chrM']\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"methylation level around TSS in different groups\")\n parser.add_argument(\"-d1\",default=\"/root/mnt/analysis/baidu_II/denovo_DMR\",help=\"metilene result dir\")\n parser.add_argument(\"-d2\",default=\"/root/mnt/analysis/baidu_II/WGBS_data\", help=\"CX report dir\")\n args = parser.parse_args()\n return args.d1,args.d2\n\n@timer\ndef process(sample_pair,dir2):\n print(\"开始处理%s\" % (sample_pair))\n samples = sample_pair.split(\"_\")\n DMR_df = pd.read_table(\"%s/%s_denovo_metilene_CG_qval.0.05.out\" % (sample_pair, sample_pair), header=None,\n names=[\"chr\", \"start\", \"end\", \"pvalue\", \"DMR diff\", \"CpG num\", \"methy T\", \"methy N\"],\n index_col=0, dtype={\"start\": int, \"end\": int})\n DMR_df[\"chr\"] = DMR_df.index\n DMR_df = DMR_df[[\"chr\", \"start\", \"end\"]]\n tumor_file = \"%s/%s/%s.CX_report.txt.simplified\" % (dir2, samples[0], samples[0])\n normal_file = \"%s/%s/%s.CX_report.txt.simplified\" % (dir2, samples[1], samples[1])\n tumor_df = pd.read_table(tumor_file, header=None, names=[\"chr\", \"loc\", \"meth\", \"unmeth\", \"pvalue\"], index_col=0,\n dtype={\"loc\": int})\n normal_df = pd.read_table(normal_file, header=None, names=[\"chr\", \"loc\", \"meth\", \"unmeth\", \"pvalue\"], index_col=0,\n dtype={\"loc\": int})\n tumor_wd = \"%s/%s\" % (dir2, samples[0])\n normal_wd = \"%s/%s\" % (dir2, samples[1])\n bed_sn = uuid.uuid4().hex\n cal_region_methylation_level(DMR_df, tumor_df, tumor_wd, samples[0], \"/root/mnt/analysis/baidu_II/bed/chr_bed\", bed_sn,\"DMR_region_methylation_level.txt\",chrList=chrList)\n cal_region_methylation_level(DMR_df, normal_df, normal_wd, samples[1], \"/root/mnt/analysis/baidu_II/bed/chr_bed\", bed_sn, \"DMR_region_methylation_level.txt\",chrList=chrList)\n ### 把中间bed文件删掉\n shutil.rmtree(\"/root/mnt/analysis/baidu_II/bed/chr_bed/%s\" % (bed_sn))\n print(\"%s处理完毕\" % (sample_pair))\n\ndef pipeline(dir1,dir2):\n os.chdir(dir1)\n pool = multiprocessing.Pool(processes=4)\n for sample_pair in glob.glob(\"*T_*N\"):\n if not os.path.isdir(sample_pair):\n continue\n pool.apply_async(process, args=(sample_pair,dir2,))\n pool.close()\n pool.join()\n\nif __name__ == \"__main__\":\n dir1,dir2 = parse_args()\n pipeline(dir1,dir2)\n","sub_path":"code_record/Baidu_WGBS_project/recalculate_denovo_DMR_methylation_level.py","file_name":"recalculate_denovo_DMR_methylation_level.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"326069131","text":"class Solution(object):\n def kthSmallest(self, root, k):\n \"\"\"\n :type root: TreeNode\n :type k: int\n :rtype: int\n \"\"\"\n self.count = 0\n\n def trav(node):\n if node is None:\n return\n val = trav(node.left)\n if val is not None:\n return val\n self.count += 1\n if self.count == k:\n return node.val\n val = trav(node.right)\n if val is not None:\n return val\n\n return trav(root)\n","sub_path":"kth_smallest_element_in_a_bst.py","file_name":"kth_smallest_element_in_a_bst.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"149396663","text":"\"\"\"\nHandler - функция, которая принимает на вход text (текст входящего сообщения) и context (dict), а возвращает bool:\nTrue, если шаг пройден, False если данные введены неправильно.\n\"\"\"\nimport re\n\nfrom generate_ticket import generate_ticket\n\nre_name = re.compile(r'^[\\w\\-\\s]{3,30}$')\nre_email = re.compile(r\"\\b[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\b\")\n\n\ndef handle_name(text, context):\n \"\"\"\n Check whether the name is correct or not and if it is - putting this in context dict\n\n :param text: name, that user entered\n :param context: dict with parameters\n :return: bool\n \"\"\"\n match = re.match(re_name, text)\n if match:\n context['name'] = text\n return True\n else:\n return False\n\n\ndef handle_email(text, context):\n \"\"\"\n Check whether the email is correct or not and if it is - putting this in context dict\n\n :param text: email, that user entered\n :param context: dict with parameters\n :return: bool\n \"\"\"\n matches = re.findall(re_email, text)\n if len(matches) > 0:\n context['email'] = matches[0]\n return True\n else:\n return False\n\n\ndef generate_ticket_handler(text, context):\n \"\"\"\n Generating the ticket\n\n :param text:\n :param context: dict with parameters for creating the ticket\n :return: ticket file\n \"\"\"\n return generate_ticket(name=context['name'], email=context['email'])\n","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"307019732","text":"# Документация - http://www.dabeaz.com/ply/ply.html\n\nimport ply.lex as lex\nimport ply.yacc as yacc\nimport sys\n\n# Список с названиями токенов\ntokens = [\n\n 'INT',\n 'FLOAT',\n 'NAME',\n 'PLUS',\n 'MINUS',\n 'DIVIDE',\n 'MULTIPLY',\n 'EQUALS'\n\n]\n\n# Регулярные выражения, которые описывают что находится в токенах\nt_PLUS = r'\\+'\nt_MINUS = r'\\-'\nt_MULTIPLY = r'\\*'\nt_DIVIDE = r'\\/'\nt_EQUALS = r'\\='\n\n# Специальная переменная t_ignore позволяет указать символы, которые будет игнорировать lex\n# Игнорим только пробелы, так как в перспективе будем работать с переменными.\nt_ignore = r' '\n\n# Описание сложных токенов, чья длинна может быть более одного символа.\n# указаны через функции.\n# Флот на первом месте, чтобы питон распознавал его первым, иначе могут быть неточности если мы сперва будет делать инт.\ndef t_FLOAT(t):\n r'\\d+\\.\\d+' # число с любым кол-вом знаков до разделителя точка, точка, любое число знаков после точки.\n t.value = float(t.value) #Лексема, сравнение текста.\n return t\n\n# Инт может быть более чем один символ.\ndef t_INT(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n# NAME - это имя кастомной переменной. Может быть 1 или более символов в длинну.\n# Первый символ должен быть в промежутке от a-z или нижнее подчеркивание, остальные символы могут быть тоже буквами или нижним подчеркиванием или цифрой.\ndef t_NAME(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = 'NAME'\n return t\n\n# Пропустить этот токен, чтобы не было ошибки и выдать сообщениеиспользуя специальную функцию Плай t_error.\ndef t_error(t):\n print(\"Неверное значение\")\n t.lexer.skip(1)\n\n# Создаем lexer.\nlexer = lex.lex()\n\n# Это специальная магия Ply для того чтобы указать парсеру в каком порядке нужно делать вычисления. Скопировано от сюда - https://www.dabeaz.com/ply/example.html\nprecedence = (\n\n ('left', 'PLUS', 'MINUS'),\n ('left', 'MULTIPLY', 'DIVIDE')\n\n)\n\n# Указываем грамматику согласно пункту 6.2 дока. Разрешаем expressions, var_assign и empty.\ndef p_calc(p):\n '''\n calc : expression\n | var_assign\n | empty\n '''\n print(run(p[1]))\n\ndef p_var_assign(p):\n '''\n var_assign : NAME EQUALS expression\n '''\n # Создается дерево парсера\n p[0] = ('=', p[1], p[3])\n\n# Правила выражений.\ndef p_expression(p):\n '''\n expression : expression MULTIPLY expression\n | expression DIVIDE expression\n | expression PLUS expression\n | expression MINUS expression\n '''\n # Сощдается дерево парсера.\n p[0] = (p[2], p[1], p[3])\n\ndef p_expression_int_float(p):\n '''\n expression : INT\n | FLOAT\n '''\n p[0] = p[1]\n\ndef p_expression_var(p):\n '''\n expression : NAME\n '''\n p[0] = ('var', p[1])\n\n# Все что не expressions, var_assign и empty - это ошибка.\n# p_error - это еще одна магия Ply.\ndef p_error(p):\n print(\"Синтаксическая ошибка.\")\n\ndef p_empty(p):\n '''\n empty :\n '''\n p[0] = None\n\n# Делаем парсер\nparser = yacc.yacc()\n\n# Словарь для хранения и извлечения переменных.\nenv = {}\n\n# Эту функция проходит по сгенерированному парсером дереву.\ndef run(p):\n global env\n if type(p) == tuple:\n if p[0] == '+':\n return run(p[1]) + run(p[2])\n elif p[0] == '-':\n return run(p[1]) - run(p[2])\n elif p[0] == '*':\n return run(p[1]) * run(p[2])\n elif p[0] == '/': \n return run(p[1]) / run(p[2])\n elif p[0] == '=':\n env[p[1]] = run(p[2])\n return ''\n elif p[0] == 'var':\n if p[1] not in env:\n return 'Undeclared variable found!'\n else:\n return env[p[1]]\n else:\n return p\n\n# Создаем интерфейс для взаимодействия с калькулятором. \nwhile True: \n try:\n s = input('Введите выражение: ')\n except EOFError:\n break \n parser.parse(s)","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"548947509","text":"from django import forms\n\n#from the models, we have this, (couchmodels.py)\n#flipping to tuple\nfrom django.forms import Form\nfrom corehq.apps.api.es import ReportCaseESView\nfrom dimagi.utils.parsing import json_format_date\nfrom pact.enums import PACT_HP_CHOICES, PACT_DOT_CHOICES, PACT_REGIMEN_CHOICES, GENDER_CHOICES, PACT_RACE_CHOICES, PACT_HIV_CLINIC_CHOICES, PACT_LANGUAGE_CHOICES, CASE_NONART_REGIMEN_PROP, CASE_ART_REGIMEN_PROP, DOT_ART, DOT_NONART\nfrom django.forms import widgets\nfrom pact.regimen import regimen_dict_from_choice\n\n\ndef get_hp_choices():\n from pact.reports.patient_list import PactPrimaryHPField\n return [(x['val'], x['text']) for x in PactPrimaryHPField.get_chws()]\n\n\nclass PactPatientForm(Form):\n \"\"\"\n DocumentForm\n \"\"\"\n pactid = forms.CharField(label=\"PACT ID\", required=True)\n\n first_name = forms.CharField(label=\"First Name\", required=True)\n middle_name = forms.CharField(label=\"Middle Name\", required=False)\n last_name = forms.CharField(label=\"Last Name\", required=True)\n\n gender = forms.ChoiceField(label=\"Sex\", choices=GENDER_CHOICES)\n #source: http://stackoverflow.com/questions/1513502/django-how-to-format-a-datefields-date-representation\n dob = forms.DateField(required=False, label='DOB (m/d/y)', input_formats=['%m/%d/%Y'], widget=forms.DateInput(format = '%m/%d/%Y', attrs={'class': 'jqui-dtpk'}))\n race = forms.ChoiceField(choices=PACT_RACE_CHOICES)\n preferred_language = forms.ChoiceField(choices=PACT_LANGUAGE_CHOICES)\n\n mass_health_expiration = forms.DateField(label = \"Mass Health expiration date (m/d/y)\", input_formats=['%m/%d/%Y', ''], widget=forms.DateInput(format = '%m/%d/%Y'), required=False)\n ssn = forms.CharField(label=\"Social Security Number\", required=False)\n\n hp = forms.ChoiceField(label=\"Primary health promoter\", choices=())\n\n hp_status = forms.ChoiceField(label=\"HP Status\", choices=PACT_HP_CHOICES, required=False)\n dot_status = forms.ChoiceField(label=\"DOT Status\", choices=PACT_DOT_CHOICES, required=False)\n artregimen = forms.ChoiceField(choices=PACT_REGIMEN_CHOICES, required=False)\n nonartregimen = forms.ChoiceField(choices=PACT_REGIMEN_CHOICES, required=False)\n hiv_care_clinic = forms.ChoiceField(choices=PACT_HIV_CLINIC_CHOICES)\n\n patient_notes = forms.CharField(widget = widgets.Textarea(attrs={'cols':80,'rows':5}), required=False)\n\n def __init__(self, request, casedoc, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.casedoc = casedoc\n self.fields['hp'].choices = get_hp_choices()\n self.case_es = ReportCaseESView(request.domain)\n for name, field in self.fields.items():\n if name == CASE_ART_REGIMEN_PROP:\n #these really should be a widget of some type\n #dereference the artregimen, dot_a_one...etc to become the comma separated regimen string for the form\n art_regimen_initial = self.casedoc.art_regimen_label_string()\n casedoc_value = art_regimen_initial\n elif name == CASE_NONART_REGIMEN_PROP:\n nonart_regimen_initial = self.casedoc.nonart_regimen_label_string()\n casedoc_value = nonart_regimen_initial\n else:\n casedoc_value = getattr(self.casedoc, name, '')\n field.initial = casedoc_value\n\n @property\n def clean_changed_data(self):\n #to be called after validation\n ret = {}\n for name, value in self.cleaned_data.items():\n #to verify that the regimens changed calculate the dict of the freq+label ids.\n if name == CASE_ART_REGIMEN_PROP:\n art_props = regimen_dict_from_choice(DOT_ART, value)\n if art_props != self.casedoc.art_properties():\n ret.update(art_props)\n elif name == CASE_NONART_REGIMEN_PROP:\n nonart_props = regimen_dict_from_choice(DOT_NONART, value)\n if nonart_props != self.casedoc.nonart_properties():\n ret.update(nonart_props)\n else:\n if getattr(self.casedoc, name, '') != value:\n ret[name] = value\n\n # hack, if any of the names, change remake the name and initials\n name_changed = False\n if 'first_name' in list(ret.keys()):\n name_changed = True\n first_name = ret['first_name']\n else:\n first_name = self.casedoc.first_name\n\n if 'last_name' in list(ret.keys()):\n name_changed = True\n last_name = ret['last_name']\n else:\n last_name = self.casedoc.last_name\n\n if name_changed:\n ret['name'] = '%s %s' % (first_name, last_name)\n ret['initials'] = '%s%s' % (first_name[0] if len(first_name) > 1 else '', last_name[0] if len(last_name) > 0 else '')\n\n return ret\n\n def clean_dob(self):\n if self.cleaned_data['dob'] is not None:\n return json_format_date(self.cleaned_data['dob'])\n else:\n return None\n\n def clean_mass_health_expiration(self):\n if self.cleaned_data['mass_health_expiration'] is not None:\n return json_format_date(self.cleaned_data['mass_health_expiration'])\n else:\n return None\n","sub_path":"custom/_legacy/pact/forms/patient_form.py","file_name":"patient_form.py","file_ext":"py","file_size_in_byte":5269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"139023850","text":"# Задание\n# мне нужно отыскать файл среди десятков других\n# я знаю некоторые части этого файла (на память или из другого источника)\n# я ищу только среди .sql файлов\n# 1. программа ожидает строку, которую будет искать (input())\n# после того, как строка введена, программа ищет её во всех файлах\n# выводит список найденных файлов построчно\n# выводит количество найденных файлов\n# 2. снова ожидает ввод\n# поиск происходит только среди найденных на этапе 1\n# 3. снова ожидает ввод\n# ...\n# Выход из программы программировать не нужно.\n# Достаточно принудительно остановить, для этого можете нажать Ctrl + C\n\n# Пример на настоящих данных\n\n# python3 find_procedure.py\n# Введите строку: INSERT\n# ... большой список файлов ...\n# Всего: 301\n# Введите строку: APPLICATION_SETUP\n# ... большой список файлов ...\n# Всего: 26\n# Введите строку: A400M\n# ... большой список файлов ...\n# Всего: 17\n# Введите строку: 0.0\n# Migrations/000_PSE_Application_setup.sql\n# Migrations/100_1-32_PSE_Application_setup.sql\n# Всего: 2\n# Введите строку: 2.0\n# Migrations/000_PSE_Application_setup.sql\n# Всего: 1\n\n# не забываем организовывать собственный код в функции\n\nimport os\n\nmigrations = 'Migrations'\n# current_dir = os.path.dirname(os.path.abspath(__file__))\n\nprint('-' * 40)\n\ndef find_file(files_list = []):\n n = 0\n text = input('Введите текст для поиска:')\n if files_list == []:\n os.chdir('/Users/azarovdn/Desktop/Netology/03_Python/08_Work_whth_path/Migrations')\n for files in os.listdir(\"/Users/azarovdn/Desktop/Netology/03_Python/08_Work_whth_path/Migrations/\"):\n file_inside = True\n if files.endswith(\".sql\"):\n with open(files) as file:\n for line in file:\n if text in line and file_inside is True:\n files_list.append(files)\n n += 1\n file_inside = False\n\n else:\n files_list_doubler = []\n for files in files_list:\n with open(files) as file:\n file_inside = True\n for line in file:\n if text in line and file_inside is True:\n files_list_doubler.append(files)\n file_inside = False\n n += 1\n files_list = files_list_doubler\n\n if files_list == []:\n print('Файлы не обнаружены')\n return\n elif len(files_list) > 10:\n print('... большой список файлов ...')\n print('Всего: {}'.format(len(files_list)))\n find_file(files_list)\n elif len(files_list) > 1:\n print('Всего: {}'.format(len(files_list)))\n for file in files_list:\n print('{}/{}'.format(migrations,file))\n find_file(files_list)\n elif len(files_list) == 1:\n print('{}/{}'.format(migrations,files_list[0]))\n return\n\n\nfind_file()\n\n\n\n# if __name__ == '__main__':\n# passv","sub_path":"find_procedure.py","file_name":"find_procedure.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"593126213","text":"from keras import layers\nfrom keras import models\nfrom keras import Input\n\n\nvocabularySize = 50000\nnumIncomeGroups = 10\n\npostsInput = Input(shape=(None, ), dtype='int32', name='posts')\nembeddedPosts = layers.Embedding(256, vocabularySize)(postsInput)\n\nx = layers.Conv1D(128, 5, activation='relu')(embeddedPosts)\nx = layers.MaxPooling1D(5)(x)\nx = layers.Conv1D(256, 5, activation='relu')(x)\nx = layers.Conv1D(256, 5, activation='relu')(x)\nx = layers.MaxPooling1D(5)(x)\nx = layers.Conv1D(256, 5, activation='relu')(x)\nx = layers.Conv1D(256, 5, activation='relu')(x)\nx = layers.GlobalMaxPooling1D(x)\nx = layers.Dense(128, activation='relu')(x)\n\nagePrediction = layers.Dense(1, name='age')(x)\nincomePrediction = layers.Dense(numIncomeGroups, activation='softmax', name='income')(x)\ngenderPrediction = layers.Dense(1, activation='sigmoid', name='gender')(x)\n\nmodel = models.Model(postsInput, [agePrediction, incomePrediction, genderPrediction])\n\n# When compiling the model, we should specify different loss functions for every output.\n# Resulting loss values are summed into a global loss which is minimized during training.\n# Imbalanced loss contributions may make the model optimize preferentially for the task whose loss\n# values are bigger, so we should assign weights to these loss functions according to their range values.\nmodel.compile(optimizer='rmsprop',\n loss={'age': 'mse',\n 'income': 'categorical_crossentropy',\n 'gender': 'binary_crossentropy'},\n loss_weights={'age': 0.25,\n 'income': 1.0,\n 'gender': 10.0}\n )\n\n\n","sub_path":"Examples/FunctionalKeras/posts_multi_output_ex.py","file_name":"posts_multi_output_ex.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"507741928","text":"from LoadData import *\nfrom lasagne import layers\nfrom lasagne.updates import nesterov_momentum, rmsprop\nfrom nolearn.lasagne import NeuralNet\nimport matplotlib.pyplot as plt\n\nnet1 = NeuralNet(\n\tlayers=[ #Three layers, one hidden\n\t\t('input', layers.InputLayer),\n\t\t('hidden', layers.DenseLayer),\n\t\t('output', layers.DenseLayer),\n\t],\n\t#Layer parameters:\n\tinput_shape=(None, 9216), #96x96 input pixels per batch\n\thidden_num_units=100, #100 hidden units\n\toutput_nonlinearity=None, #output layer uses identity function\n\toutput_num_units=30, #30 target values\n\n\t#Optimization method\n\tupdate=nesterov_momentum,\n\tupdate_learning_rate=0.01,\n\tupdate_momentum=0.9,\n\t#update=rmsprop,\n\t#update_learning_rate=0.01,\n\t#update_rho=0.9,\n\t#update_epsilon=1e-06,\n\n\tregression=True, #we are doing regression, not classification\n\tmax_epochs=100, #number of epochs to train\n\tverbose=1,\n\t)\n\nX,y = load()\nnet1.fit(X, y)\n\nimport cPickle as pickle\nwith open('net1.pickle', 'wb') as f:\n pickle.dump(net1, f, -1)\n\ntrain_loss = np.array([i[\"train_loss\"] for i in net1.train_history_])\nvalid_loss = np.array([i[\"valid_loss\"] for i in net1.train_history_])\nplt.plot(train_loss, linewidth=3, label=\"train\")\nplt.plot(valid_loss, linewidth=3, label=\"valid\")\nplt.grid()\nplt.legend()\nplt.xlabel(\"epoch\")\nplt.ylabel(\"loss\")\nplt.ylim(1e-3,1e-2)\nplt.yscale('log')\nplt.show()\n\nX,_ = load(test=True)\ny_pred = net1.predict(X)\nfrom plot_sample import *\ncreate_plot(X, y_pred)\n","sub_path":"singleLayer.py","file_name":"singleLayer.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"537352022","text":"import math\nimport random \nfrom turtle import Turtle\n\n# the grammar: U (up), D (down), L (left), R (right)\n# generating each with an equal probability\n# later, the idea should be to do a random walk, i.e. with a rotation of a particular angle\n# at that point the grammar should maybe have tokens for move and for rotate\n\n# all moves are the same length\n\n# tokens = [\"U\", \"D\", \"L\", \"R\"]\ntokens = [90, 270, 180, 0]\n\n# l = length of path to gen\ndef squareWalkGen( l):\n\ttoRet = [];\n\tfor i in range( l):\t\n\t\ttoRet += [tokens[ random.randint(0, len( tokens) - 1)]]\n\treturn toRet\n\ndef walkAndDraw( path, t, stepLen):\n\tt.down()\n\n\tfor dir in path:\n\t\tt.setheading( dir)\n\t\tt.forward( stepLen)\n\n\ndef main():\n\tstepLen = 30\n\tpathLen = 100\n\n\tt = Turtle()\n\tt.pencolor(\"red\")\n\tt.screen.bgcolor(\"black\")\n\n\tp = squareWalkGen( pathLen)\n\n\twalkAndDraw( p, t, stepLen)\n\tt.hideturtle()\n\n\tinput()\n\nmain()\n\n\n","sub_path":"squareWalk.py","file_name":"squareWalk.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"361291156","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport os\nfrom multiprocessing import Pool\nimport re\nimport random\n\nimport numpy\nfrom tqdm import tqdm\nfrom utils import remove_punct\nfrom functools import partial\nfrom glob import glob\nfrom pydub import AudioSegment\n\nmax_duration = 10 * 1000\n\ndef to_list(chunk):\n text = []\n for line in chunk:\n filename = line.split(\" \")[1]\n t = \" \".join(line.strip().split(\" \")[3:])\n\n if len(text) != 0:\n silence = AudioSegment.silent(500, 16000)\n segment = segment.append(silence, crossfade=50)\n\n next_chunk = AudioSegment.from_file(filename)\n segment = segment.append(next_chunk, crossfade=50) \n else:\n segment = AudioSegment.from_file(filename)\n text.append(t.strip())\n file_id = line.split(\" \")[0]\n audio_path = \"/\".join(filename.split(\"/\")[:-3])\n filename = f\"{audio_path}/ami_combined/{file_id}.flac\"\n segment = segment.set_sample_width(2)\n segment = segment.set_frame_rate(16000)\n segment.export(filename, format=\"flac\")\n dur = len(segment)\n text = \" \".join(text).strip()\n \n return f\"{file_id} {filename} {dur}.0 {text}\\n\"\n \n\ndef prepare_ami(audio_path, text_path, lists_path, processes):\n train_file = f\"{lists_path}/ami-combined-train.lst\"\n test_file = f\"{lists_path}/ami-combined-test.lst\"\n\n if not os.path.exists(train_file) or not os.path.exists(test_file):\n train = []\n test = []\n for f in ['dev', 'train']:\n for d in ['mdm', 'ihm', 'sdm']:\n with open(os.path.join(lists_path, f\"ami-{d}-{f}.lst\")) as lst:\n for line in lst:\n dur = float(line.strip().split(\" \")[2])\n if dur < max_duration / 2:\n train.append(line)\n\n for f in ['test']:\n for d in ['mdm', 'ihm', 'sdm']:\n with open(os.path.join(lists_path, f\"ami-{d}-{f}.lst\")) as lst:\n for line in lst:\n dur = float(line.strip().split(\" \")[2])\n if dur < max_duration / 2:\n test.append(line)\n \n random.shuffle(train)\n random.shuffle(test)\n\n train_chunks = []\n test_chunks = []\n curr_chunk = []\n total_dur = 0\n while len(train) > 0:\n line = train.pop(0)\n dur = float(line.strip().split(\" \")[2])\n if total_dur > max_duration:\n train_chunks.append(curr_chunk)\n curr_chunk = []\n total_dur = 0\n total_dur += dur\n curr_chunk.append(line)\n \n if len(curr_chunk) > 0:\n train_chunks.append(curr_chunk)\n curr_chunk = []\n total_dur = 0\n \n while len(test) > 0:\n line = test.pop(0)\n dur = float(line.strip().split(\" \")[2])\n if total_dur > max_duration:\n test_chunks.append(curr_chunk)\n curr_chunk = []\n total_dur = 0\n total_dur += dur\n curr_chunk.append(line)\n\n if len(curr_chunk) > 0:\n test_chunks.append(curr_chunk)\n curr_chunk = []\n \n with Pool(processes) as p: \n test_data = list(\n tqdm(\n p.imap(to_list, test_chunks),\n total=len(test_chunks),\n )\n )\n\n with Pool(processes) as p: \n train_data = list(\n tqdm(\n p.imap(to_list, train_chunks),\n total=len(train_chunks),\n )\n )\n\n\n with open(train_file, \"w\") as lst, open(test_file, \"w\") as lst_test:\n lst.writelines(train_data)\n lst_test.writelines(test_data)\n \n print(\"Prepared Combined AMI\", flush=True)\n\n\nif __name__ == \"__main__\":\n parser=argparse.ArgumentParser(description=\"Combined Dataset creation.\")\n parser.add_argument(\n \"--dst\",\n help=\"destination directory where to store data\",\n default=\"./data_dir\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--process\",\n help=\"number of process for multiprocessing\",\n default=8,\n type=int,\n )\n\n args=parser.parse_args()\n\n audio_path=os.path.join(args.dst, \"audio\")\n text_path=os.path.join(args.dst, \"text\")\n lists_path=os.path.join(args.dst, \"lists\")\n os.makedirs(f\"{audio_path}/ami_combined\", exist_ok=True)\n os.makedirs(text_path, exist_ok=True)\n os.makedirs(lists_path, exist_ok=True)\n\n prepare_ami(audio_path, text_path, lists_path, args.process)\n\n","sub_path":"recipes/data/combined/combine_ami.py","file_name":"combine_ami.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"472590570","text":"from django.test import TestCase, Client\nfrom django.urls import reverse\nfrom users.models import User\nfrom ..models import Product\nfrom .test_resources import datasets\nfrom products.tests.factory import ProductsFactory\n\n\nclass ProductTestCases(TestCase):\n def setUp(self):\n self.user_1 = User.objects.create_user('testuser1', 'user_1@test.com', 'lubieplacki')\n self.user_2 = User.objects.create_user('testuser2', 'user_2@test.com', 'lubieplacki')\n self.client.login(email='user_1@test.com', password='lubieplacki')\n self.products_count = Product.objects.all().count\n self.datasets = datasets\n\n def test_if_no_orphan_product_is_left_after_owner_deletion(self):\n self.assertEqual(self.products_count(), 0)\n product = ProductsFactory(owner=self.user_1)\n product_pk = product.pk\n self.assertEquals(self.products_count(), 1)\n owner = User.objects.get(username='testuser1')\n owner_pk = owner.pk\n self.assertEqual(owner.pk, Product.objects.get(pk=product.pk).owner_id)\n owner.delete()\n self.assertRaises(User.DoesNotExist, User.objects.get, pk=owner_pk)\n self.assertRaises(Product.DoesNotExist, Product.objects.get, pk=product_pk)\n\n def test_if_existing_product_is_deleted(self):\n self.assertEquals(self.products_count(), 0)\n product = ProductsFactory(owner=self.user_1)\n self.assertEquals(self.products_count(), 1)\n self.client.post(reverse('delete_product', args=(product.pk,)))\n self.assertEqual(self.products_count(), 0)\n\n def test_that_user_not_owning_the_product_cannot_delete_it(self):\n product = ProductsFactory(owner=self.user_1)\n self.client.logout()\n self.client.login(email='user_2@test.com', password='lubieplacki')\n response = self.client.post(reverse('delete_product', args=(product.pk,)))\n self.assertEqual(response.status_code, 404)\n self.assertTrue(Product.objects.filter(pk=product.pk).exists())\n\n def test_that_delete_request_made_after_successful_product_deletion_raises_404(self):\n product = ProductsFactory(owner=self.user_1)\n product_pk = product.pk\n self.client.post(reverse('delete_product', args=(product.pk,)))\n self.assertRaises(Product.DoesNotExist, Product.objects.get, pk=product_pk)\n response = self.client.post(reverse('delete_product', args=(product_pk,)))\n self.assertEqual(response.status_code, 404)\n","sub_path":"products/tests/test_delete_product.py","file_name":"test_delete_product.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"277713944","text":"# Make a class called Person. Make the __init__() method take firstname, lastname, and age as parameters and add them\n# as attributes. Make another method called talk() which makes prints a greeting from the person containing,\n# for example like this: “Hello, my name is Carl Johnson and I’m 26 years old”.\n\nclass Person:\n def __init__(self, firstname, lastname, age):\n self.firstname = firstname\n self.valid_name()\n self.lastname = lastname\n self.valid_surname()\n self.age = age\n self.valid_age()\n\n def valid_name(self):\n if self.firstname.isalpha():\n return self.firstname\n else:\n raise ValueError('Имя должно состоять из букв!')\n\n def valid_surname(self):\n if self.lastname.isalpha():\n return self.lastname\n else:\n raise ValueError('Фамилия должна состоять из букв!')\n\n def valid_age(self):\n if type(self.age) is not int:\n raise ValueError('Возраст должен иметь цифровой вид!')\n if self.age < 0 or self.age > 130:\n raise ValueError('Возраст должен быть в районе 0-130 лет!')\n else:\n return self.age\n\n def talk(self):\n print(f'Hello! My name is {self.firstname} {self.lastname} and I am {self.age} years old')\n\n\ndef main():\n firstname = 'Misha'\n lastname = 'Barkov'\n age = 19\n user = Person(firstname, lastname, age)\n user.talk()\n\n\nif __name__ == '__main__':\n try:\n main()\n except ValueError as massage:\n print(massage)\n","sub_path":"Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"492297493","text":"from nltk import word_tokenize\nimport nltk\nimport sys,re\nimport itertools\nfrom nltk.corpus import brown\n\nlist2=[\"its\",\"it's\",\"they're\",\"their\",\"you're\",\"your\",\"to\",\"too\",\"loose\",\"lose\",\"It's\",\"Its\",\"They're\",\"Their\",\"To\",\"Too\",\"Loose\",\"Lose\"]\ni=0\nlist_new=[]\noutfile= sys.argv[1]\n\nfoname=open(outfile,'w')\nlist_new=brown.tagged_words() \nfor i in range(0,len(list_new)):\n l1= list(list_new[i])\n #if l1[0] in list2:\n foname.write(l1[0])\n foname.write(\"/\")\n foname.write(l1[1])\n foname.write(\" \")\n \nfoname.close()\n\n\n","sub_path":"src/collect_data2.py","file_name":"collect_data2.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"481963777","text":"\n\nimport time\nimport datetime\nfrom datetime import timedelta\n#from random import shuffle\nimport copy\nfrom global_yard import g_dev\nimport ephem\nimport build_tycho as tycho\nimport config\nimport shelve\n#from pprint import pprint\nimport ptr_utility\nimport redis\nimport math\nimport ephem\nfrom pprint import pprint\nimport shutil\nimport os\nimport imp\nimport ptr_events\n\n'''\nAutofocus NOTE 20200122\n\nAs a general rule the focus is stable(temp). So when code (re)starts, compute and go to that point(filter).\n\nNautical or astronomical dark, and time of last focus > 2 hours or delta-temp > ?1C, then schedule an\nautofocus. Presumably system is near the bottom of the focus parabola, but it may not be.\n\nPick a ~7mag focus star at an Alt of about 60 degrees, generally in the South. Later on we can start\nchosing and logging a range of altitudes so we can develop adj_focus(temp, alt, flip_side).\n\nTake central image, move in 1x and expose, move out 2x then in 1x and expose, solve the equation and\nthen finish with a check exposure.\n\nNow there are cases if for some reason telescope is not near the focus: first the minimum is at one end\nof a linear series. From that series and the image diameters we can imply where the focus is, subject to\nseeing induced errors. If either case occurs, go to the projected point and try again.\n\nA second case is the focus is WAY off, and or pointing. Make appropriate adjustments and try again.\n\nThe third case is we have a minimum. Inspection of the FWHM may imply seeing is poor. In that case\ndouble the exposure and possibly do a 5-point fit rather than a 3-point.\n\nNote at the last exposure it is reasonable to do a minor recalibrate of the pointing.\n\nOnce we have fully automatic observing it might make sense to do a more full range test of the focus mechanism\nand or visit more altitudes and temeperatures.\n\n\n\n1) Implement mag 7 star selection including getting that star at center of rotation.\n\n2) Implement using Sep to reliably find that star.\n\n3) change use of site config file.\n\n4) use common settings for sep\n\n\n'''\n\n\ndef fit_quadratic(x, y):\n #From Meeus, works fine.\n #Abscissa arguments do not need to be ordered for this to work.\n #NB Single alpha variable names confict with debugger commands, so bad practce.\n if len(x) == len(y):\n p = 0\n q = 0\n r = 0\n s = 0\n t = 0\n u = 0\n v = 0\n for i in range(len(x)):\n p += x[i]\n q += x[i]**2\n r += x[i]**3\n s += x[i]**4\n t += y[i]\n u += x[i]*y[i]\n v += x[i]**2*y[i]\n n = len(x)\n d = n*q*s +2*p*q*r - q*q*q - p*p*s - n*r*r\n a = (n*q*v + p*r*t + p*q*u - q*q*t - p*p*v - n*r*u)/d\n b = (n*s*u + p*q*v + q*r*t - q*q*u - p*s*t - n*r*v)/d\n c = (q*s*t + q*r*u + p*r*v - q*q*v - p*s*u - r*r*t)/d\n print('Quad; ', a, b, c)\n try:\n return (a, b, c, -b/(2*a))\n except:\n return (a, b, c)\n else:\n return None\n\ndef bin_to_string(use_bin):\n if use_bin == 1:\n return '1, 1'\n if use_bin == 2:\n return '2, 2'\n if use_bin == 3:\n return '3, 3'\n if use_bin == 4:\n return '4, 4'\n if use_bin == 5:\n return'5, 5'\n else:\n return '1, 1'\n\ndef ra_fix(ra):\n while ra >= 24:\n ra -= 24\n while ra < 0:\n ra +=24\n return ra\n\ndef ra_dec_fix_hd(ra, dec):\n if dec > 90:\n dec = 180 - dec\n ra -= 12\n if dec < -90:\n dec = -180 - dec\n ra += 12\n if ra >= 24:\n ra -= 24\n if ra < 0:\n ra += 24\n return ra, dec\n\nclass Sequencer:\n\n def __init__(self, driver: str, name: str, config: dict, astro_events):\n self.name = name\n self.astro_events = astro_events\n self.config = config\n\n g_dev['seq'] = self\n self.connected = True\n self.description = \"Sequencer for script execution.\"\n self.sequencer_hold = False\n self.sequencer_message = '-'\n print(\"sequencer connected.\")\n print(self.description)\n redis_ip = config['redis_ip']\n\n if redis_ip is not None:\n self.redis_server = redis.StrictRedis(host=redis_ip, port=6379, db=0,\n decode_responses=True)\n self.redis_wx_enabled = True\n else:\n self.redis_wx_enabled = False\n self.sky_guard = False\n self.af_guard = False\n self.block_guard = False\n self.time_of_next_slew = time.time()\n #NB NB These should be set up from config once a day at Noon/Startup time\n self.bias_dark_latch = True #NB NB NB Should these initially be defined this way?\n self.sky_flat_latch = True\n self.morn_sky_flat_latch = True\n self.morn_bias_dark_latch = True #NB NB NB Should these initially be defined this way?\n #breakpoint()\n self.reset_completes()\n\n try:\n self.is_in_completes(None)\n except:\n self.reset_completes()\n\n\n\n def get_status(self):\n status = {\n \"active_script\": None,\n \"sequencer_busy\": False\n }\n #20211026 I think this is causing problems. WER\n # if not self.sequencer_hold: # NB THis should be wrapped in a timeout.\n # if g_dev['obs'].status_count > 3: #Gove syste time to settle.\n # self.manager() # There be dragons here! <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n return status\n\n\n\n\n\n def parse_command(self, command):\n req = command['required_params']\n opt = command['optional_params']\n g_dev['cam'].user_id = command['user_id']\n g_dev['cam'].user_name = command['user_name']\n action = command['action']\n script = command['required_params']['script']\n if action == \"run\" and script == 'focusAuto':\n self.auto_focus_script(req, opt)\n elif action == \"run\" and script == 'focusFine':\n self.coarse_focus_script(req, opt)\n elif action == \"run\" and script == 'genScreenFlatMasters':\n self.screen_flat_script(req, opt)\n elif action == \"run\" and script == 'genSkyFlatMasters':\n self.sky_flat_script(req, opt)\n elif action == \"run\" and script in ['32TargetPointingRun', 'pointingRun', 'makeModel']:\n if req['gridType'] == 'sweep':\n self.equatorial_pointing_run(req, opt)\n elif req['gridType'] == 'cross':\n self.cross_pointing_run(req, opt)\n else:\n self.sky_grid_pointing_run(req, opt)\n elif action == \"run\" and script in (\"genBiasDarkMaster\", \"genBiasDarkMasters\"):\n self.bias_dark_script(req, opt, morn=True)\n elif action == \"run\" and script == 'takeLRGBStack':\n self.take_lrgb_stack(req, opt)\n elif action == \"run\" and script == \"takeO3HaS2N2Stack\":\n self.take_lrgb_stack(req, opt)\n elif action.lower() in [\"stop\", \"cancel\"]:\n self.stop_command(req, opt)\n elif action == \"home\":\n #breakpoint()\n self.home_command(req, opt)\n elif action == 'run' and script == 'findFieldCenter':\n g_dev['mnt'].go_command(req, opt, calibrate=True, auto_center=True)\n elif action == 'run' and script == 'calibrateAtFieldCenter':\n g_dev['mnt'].go_command(req, opt, calibrate=True, auto_center=False)\n else:\n print('Sequencer command: ', command, ' not recognized.')\n\n def enc_to_skyflat_and_open(self ,enc_status, ocn_status, no_sky=False):\n #ocn_status = eval(self.redis_server.get('ocn_status'))\n #NB 120 is enough time to telescope to get pointed to East\n self.time_of_next_slew = time.time() -1 #Set up so next block executes if unparked.\n if g_dev['mnt'].mount.AtParK:\n g_dev['mnt'].unpark_command({}, {}) # Get there early\n time.sleep(3)\n self.time_of_next_slew = time.time() + 120 #NB 120 is enough time to telescope to get pointed to East\n if not no_sky:\n g_dev['mnt'].slewToSkyFlatAsync()\n #This should run once. Next time this phase is entered in > 120 seconds we\n #flat_spot, flat_alt = g_dev['evnt'].flat_spot_now()\n\n\n if time.time() >= self.time_of_next_slew:\n #We slew to anti-solar Az and reissue this command every 120 seconds\n flat_spot, flat_alt = g_dev['evnt'].flat_spot_now()\n try:\n if not no_sky:\n g_dev['mnt'].slewToSkyFlatAsync()\n time.sleep(10)\n print(\"Open and slew Dome to azimuth opposite the Sun: \", round(flat_spot, 1))\n\n if enc_status['shutter_status'] in ['Closed', 'closed'] \\\n and ocn_status['hold_duration'] <= 0.1: #NB\n #breakpoint()\n g_dev['enc'].open_command({}, {})\n print(\"Opening dome, will set Synchronize in 10 seconds.\")\n time.sleep(10)\n g_dev['enc'].sync_mount_command({}, {})\n #Prior to skyflats no dome following.\n self.dome_homed = False\n self.time_of_next_slew = time.time() + 60 # seconds between slews.\n except:\n pass#\n return\n\n def park_and_close(self, enc_status):\n try:\n if not g_dev['mnt'].mount.AtParK: ###Test comment here\n g_dev['mnt'].park_command({}, {}) # Get there early\n except:\n print(\"Park not executed during Park and Close\" )\n try:\n if enc_status['shutter_status'] in ['open', ]:\n g_dev['enc'].close_command( {}, {})\n except:\n print('Dome close not executed during Park and Close.')\n\n # def archive_cull(self):\n # FORTNIGHT=60*60*24*7*2\n # #dir_path='D:/PTRMFO/'\n\n # dir_path=self.config['client_path'] + '\\\\' + 'archive'\n # cameras=[d for d in os.listdir(dir_path) if os.path.isdir(d)]\n # for camera in cameras: # Go through each camera directory\n # print (\"*****************************************\")\n # print (\"Camera: \" + str(camera))\n # timenow_cull=time.time()\n # cameradir=dir_path + '\\\\' + camera + '\\\\'\n # directories=[d for d in os.listdir(cameradir) if os.path.isdir(d)]\n # deleteDirectories=[]\n # deleteTimes=[]\n # for q in range(len(directories)):\n # if ((timenow_cull)-os.path.getmtime(cameradir + directories[q])) > FORTNIGHT:\n # deleteDirectories.append(directories[q])\n # deleteTimes.append(((timenow_cull)-os.path.getmtime(cameradir +directories[q])) /60/60/24/7)\n\n\n\n # print (\"These are the directories earmarked for \")\n # print (\"Eternal destruction. And how old they are\")\n # print (\"in weeks\\n\")\n # g_dev['obs'].send_to_user(\"Culling \" + str(len(deleteDirectories)) +\" from the local archive.\", p_level='INFO')\n # for entry in range(len(deleteDirectories)):\n # print (deleteDirectories[entry] + ' ' + str(deleteTimes[entry]) + ' weeks old.')\n # #shutil.rmtree(cameradir + deleteDirectories[entry]) # THIS IS THE DELETER WHEN WE ARE READY!\n\n # return\n\n ###############################\n # Sequencer Commands and Scripts\n ###############################\n def manager(self):\n '''\n This is called by the update loop. Call from local status probe was removed\n #on 20211026 WER\n\n This is where scripts are automagically started. Be careful what you put in here if it is\n going to open the dome or move the telescope at unexpected times.\n\n Scripts must not block too long or they must provide for periodic calls to check status.\n '''\n\n # NB Need a better way to get all the events.\n if g_dev['obs'].status_count < 3:\n return\n obs_win_begin, sunZ88Op, sunZ88Cl, ephem_now = self.astro_events.getSunEvents()\n ocn_status = g_dev['ocn'].status\n enc_status = g_dev['enc'].status\n events = g_dev['events']\n #g_dev['obs'].update_status() #NB NEED to be sure we have current enclosure status. Blows recursive limit\n self.current_script = \"No current script\" #NB this is an unused remnant I think.\n #if True or #Note this runs in Manual Mode as well.\n\n if self.bias_dark_latch and ((events['Eve Bias Dark'] <= ephem_now < events['End Eve Bias Dark']) and \\\n self.config['auto_eve_bias_dark'] and g_dev['enc'].mode == 'Automatic' ):\n self.bias_dark_latch = False\n # MTF - this was just to interject a focus call early in the night for testing. If it is later than Oct 2022, delete this.\n #focus_star = tycho.dist_sort_targets(g_dev['mnt'].current_icrs_ra, g_dev['mnt'].current_icrs_dec, \\\n # g_dev['mnt'].current_sidereal)\n #print (focus_star)\n req = {'bin1': False, 'bin2': True, 'bin3': False, 'bin4': False, 'numOfBias': 45, \\\n 'numOfDark': 15, 'darkTime': 180, 'numOfDark2': 3, 'dark2Time': 360, \\\n 'hotMap': True, 'coldMap': True, 'script': 'genBiasDarkMaster', }\n opt = {}\n #No action needed on the enclosure at this level\n self.park_and_close(enc_status)\n #NB The above put dome closed and telescope at Park, Which is where it should have been upon entry.\n self.bias_dark_script(req, opt, morn=False)\n self.bias_dark_latch = False\n\n #elif ( (events['End Morn Bias Dark'] + 60 * ephem.minute) <= ephem_now < events['Midday archive Cull']):\n # self.midday_cull()\n # g_dev['obs'].send_to_user(\"Culling the local archive.\", p_level='INFO')\n # print (\"Cull routine\")\n\n\n elif ((g_dev['events']['Cool Down, Open'] <= ephem_now < g_dev['events']['Eve Sky Flats']) and \\\n g_dev['enc'].mode == 'Automatic') and not g_dev['ocn'].wx_hold:\n\n self.enc_to_skyflat_and_open(enc_status, ocn_status)\n\n elif self.sky_flat_latch and ((events['Eve Sky Flats'] <= ephem_now < events['End Eve Sky Flats']) \\\n and g_dev['enc'].mode in [ 'Automatic', 'Autonomous'] and not g_dev['ocn'].wx_hold and \\\n self.config['auto_eve_sky_flat']):\n\n self.enc_to_skyflat_and_open(enc_status, ocn_status) #Just in case a Wx hold stopped opening\n self.current_script = \"Eve Sky Flat script starting\"\n #print('Skipping Eve Sky Flats')\n self.sky_flat_script({}, {}, morn=False) #Null command dictionaries\n self.sky_flat_latch = False\n\n elif enc_status['enclosure_mode'] in ['Autonomous!', 'Automatic'] and (events['Observing Begins'] <= ephem_now \\\n < events['Observing Ends']) and not g_dev['ocn'].wx_hold \\\n and g_dev['obs'].blocks is not None and g_dev['obs'].projects \\\n is not None:\n blocks = g_dev['obs'].blocks\n projects = g_dev['obs'].projects\n debug = False\n if self.config['site_roof_control'] != 'no' and enc_status['shutter_status'] in ['Closed', 'closed'] \\\n and float(ocn_status['hold_duration']) <= 0.1: #NB this blockes SR from running 20220826\n #breakpoint()\n g_dev['enc'].open_command({}, {})\n print(\"Opening dome, will set Synchronize in 10 seconds.\")\n time.sleep(10)\n g_dev['enc'].sync_mount_command({}, {})\n\n if debug:\n print(\"# of Blocks, projects: \", len(g_dev['obs'].blocks), len(g_dev['obs'].projects))\n\n #Note here we could evaluate projects to see which meet observability constraints and place them\n #In an observables list, then we could pick one to start. IF there is no pre-sheduled observing block\n #it would just run. Voila an Opportunistic scheduler. An observing block may be empty or point to\n #a project and if the project is runnable any way, it runs or is marked completed.\n # NB without deepcopy decrementing counts in blocks will be local to the machine an subject\n # to over_write as the respons from AWS updates. This is particularly important for owner\n # and background blocks.\n\n #First, sort blocks to be in ascending order, just to promote clarity. Remove expired projects.\n for block in blocks: # This merges project spec into the blocks.\n for project in projects:\n\n try:\n if block['project_id'] == project['project_name'] + '#' + project['created_at']:\n block['project'] = project\n except:\n block['project'] = None #nb nb nb 20220920 this faults with 'string indices must be integers\". WER\n\n #print('Scheduled so removing: ', project['project_name'])\n #projects.remove(project)\n\n #The residual in projects can be treated as background.\n #print('Background: ', len(projects), '\\n\\n', projects)\n\n\n house = []\n for project in projects:\n if block['project_id'] != 'none':\n try:\n\n if block['project_id'] == project['project_name'] + '#' + project['created_at']:\n block['project'] = project\n except:\n block['project'] = None\n else:\n pass\n #print(\"Reservation asserting at this time. \", )\n '''\n evaluate supplied projects for observable and mark as same. Discard\n unobservable projects. Projects may be \"site\" projects or 'ptr' (network wide:\n All, Owner, PTR-network, North, South.)\n The westernmost project is offered to run unless there is a runnable scheduled block.\n for any given time, are the constraints met? Airmass < x, Moon Phaze < y, moon dist > z,\n flip rules\n\n '''\n # breakpoint()\n # #Figure out which are observable. Currently only supports one target/proj\n # NB Observing events without a project are \"observable.\"\n # observable = []\n # for projects in projects:\n # ra = projects['project_targets']['ra']\n # dec = projects['project_targets']['dec']\n # sid = g_dev['mnt'].mount.SiderealTime\n # ha = tycho.reduceHA(sid - ra)\n # az, alt = transform_haDec_to_azAlt(ha, dec)\n # # Do not start a block within 15 min of end time???\n #print(\"Initial length: \", len(blocks))\n for block in blocks:\n now_date_timeZ = datetime.datetime.now().isoformat().split('.')[0] +'Z'\n if not self.block_guard \\\n and (block['start'] <= now_date_timeZ < block['end']) \\\n and not self.is_in_completes(block['event_id']):\n if block['project_id'] in ['none', 'real_time_slot', 'real_time_block']:\n self.block_guard = True\n return # Do not try to execute an empty block.\n self.block_guard = True\n\n completed_block = self.execute_block(block) #In this we need to ultimately watch for weather holds.\n self.append_completes(completed_block['event_id'])\n #block['project_id'] in ['none', 'real_time_slot', 'real_time_block']\n '''\n When a scheduled block is completed it is not re-entered or the block needs to\n be restored. IN the execute block we need to make a deepcopy of the input block\n so it does not get modified.\n '''\n #print('block list exhausted')\n #return Commented out 20220409 WER\n\n\n # print(\"Here we would enter an observing block: \",\n # block)\n # breakpoint()\n #OK here we go to a generalized block execution routine that runs\n #until exhaustion of the observing window.\n # else:\n # pass\n #print(\"Block tested for observatility\")\n\n # #System hangs on this state\n # elif ((g_dev['events']['Observing Ends'] < ephem_now < g_dev['events']['End Morn Sky Flats']) and \\\n # g_dev['enc'].mode == 'Automatic') and not g_dev['ocn'].wx_hold and self.config['auto_morn_sky_flat']:\n # self.enc_to_skyflat_and_open(enc_status, ocn_status)\n # #*********NB NB system hangs here\n elif self.morn_sky_flat_latch and ((events['Morn Sky Flats'] <= ephem_now < events['End Morn Sky Flats']) \\\n and g_dev['enc'].mode == 'Automatic' and not g_dev['ocn'].wx_hold and \\\n self.config['auto_morn_sky_flat']):\n self.enc_to_skyflat_and_open(enc_status, ocn_status) #Just in case a Wx hold stopped opening\n self.current_script = \"Morn Sky Flat script starting\"\n #self.morn_sky_flat_latch = False\n #print('Skipping Eve Sky Flats')\n self.sky_flat_script({}, {}, morn=True) #Null command dictionaries\n self.morn_sky_flat_latch = False\n #self.park_and_close(enc_status)\n elif self.morn_bias_dark_latch and ((events['Morn Bias Dark'] <= ephem_now < events['End Morn Bias Dark']) and \\\n self.config['auto_morn_bias_dark'] and g_dev['enc'].mode == 'Automatic' ):\n #breakpoint()\n self.morn_bias_dark_latch = False\n req = {'bin1': False, 'bin2': True, 'bin3': False, 'bin4': False, 'numOfBias': 45, \\\n 'numOfDark': 15, 'darkTime': 180, 'numOfDark2': 3, 'dark2Time': 360, \\\n 'hotMap': True, 'coldMap': True, 'script': 'genBiasDarkMaster', }\n opt = {}\n #No action needed on the enclosure at this level\n self.park_and_close(enc_status)\n #NB The above put dome closed and telescope at Park, Which is where it should have been upon entry.\n self.bias_dark_script(req, opt, morn=True)\n self.morn_bias_dark_latch = False\n self.park_and_close(enc_status)\n else:\n self.current_script = \"No current script, or site not in Automatic.\"\n try:\n pass\n #self.park_and_close(enc_status)\n except:\n print(\"Park and close failed at end of sequencer loop.\")\n return\n def take_lrgb_stack(self, req_None, opt=None):\n return\n def take_wugriz_stack(self, req_None, opt=None):\n return\n def take_UBRI_stack(self, req_None, opt=None):\n return\n def take_RGB_stack(self, req_None, opt=None):\n return\n def create_OSC_raw_image(self, req_None, opt=None):\n return\n# self.redis_server.set('sim_hold', True, ex=120)\n\n def clock_the_system(self, other_side=False):\n '''\n\n This routine carefully starts up the telescope and verifies the telescope is\n properly reporting correct coordiates and the dome is correctly positioning.\n Once a star field is returned, the system solves and synchs the telescope and\n dome if necessary. Next a detailed autofocus is performed on a Tycho star of\n known mag and position. The final reading from the autofocus is used for one\n last clocking.\n\n other_side = True causes the telescope to then flip and repeat the process.\n From differences in the solutions, flip_shift offsets can be calculated.\n\n If this routine does not solve, the night is potentially lost so an alert\n messagge should be sent to the owner and telops, the enclosure closed and\n left in manual, the telescope parked and instruments are put to bed.\n\n This routing is designed to begin when the altitude of the Sun is -9 degrees.\n The target azimuth will change so the Moon is always 15 or more degrees away.\n\n If called in the Morning and the routing fails, the system is still put to\n bed but a less urgent message is sent to the owner and telops.\n\n Returns\n -------\n None.\n\n '''\n\n '''\n if dome is closed: simulate\n if not simulate, check sun is down\n check dome is open\n\n go to 90 az 60 alt then near tycho star\n Image and look for stars (or load simulated frames)\n\n If stars not present:\n slew dome right-left increasing to find stars\n if +/- 90 az change in dome does not work then\n things are very wrong -- close down and email list.\n\n if stars present, then autofocus with wide tolerance\n if after 5 tries no luck -- close down and email list.\n\n if good autofocus then last frame is the check frame.\n\n Try to astrometrically solve it. if it solves, synch the\n telescope. Wait for dome to get in position and\n\n Take second image, solve and synch again.\n\n If tel motion > 1 amin, do one last time.\n\n Look at dome Az -- is dome following the telescope?\n Report if necessary\n\n return control.\n\n\n\n\n\n\n '''\n\n def execute_block(self, block_specification):\n #ocn_status = eval(self.redis_server.get('ocn_status'))\n #enc_status = eval(self.redis_server.get('enc_status'))\n self.block_guard = True\n # NB we assume the dome is open and already slaving.\n block = copy.deepcopy(block_specification)\n ocn_status = g_dev['ocn'].status\n enc_status = g_dev['enc'].status\n # #unpark, open dome etc.\n # #if not end of block\n # if not enc_status in ['open', 'Open', 'opening', 'Opening']:\n # self.enc_to_skyflat_and_open(enc_status, ocn_status, no_sky=True) #Just in case a Wx hold stopped opening\n # else:\n #g_dev['enc'].sync_mount_command({}, {})\n g_dev['mnt'].unpark_command({}, {})\n g_dev['mnt'].Tracking = True # unpark_command({}, {})\n g_dev['cam'].user_name = 'tobor'\n g_dev['cam'].user_id = 'tobor'\n #NB Servo the Dome??\n timer = time.time() - 1 #This should force an immediate autofocus.\n req2 = {'target': 'near_tycho_star', 'area': 150}\n opt = {}\n t = 0\n '''\n # to do is Targets*Mosaic*(sum of filters * count)\n\n Assume for now we only have one target and no mosaic factor.\n The the first thing to do is figure out how many exposures\n in the series. If enhance AF is true they need to be injected\n at some point, but it does not decrement. This is still left to do\n\n\n '''\n # if bock['project'] is None:\n #user controlled block...\n #NB NB NB if no project found, need to say so not fault. 20210624\n #breakpoint()\n\n\n # Remove None entries before running through list\n #print (block['project']['project_targets'])\n for target in block['project']['project_targets']: # NB NB NB Do multi-target projects make sense???\n\n try:\n dest_ra = float(target['ra']) - \\\n float(block_specification['project']['project_constraints']['ra_offset'])/15.\n dest_dec = float(target['dec']) - float(block_specification['project']['project_constraints']['dec_offset'])\n dest_ra, dest_dec = ra_dec_fix_hd(dest_ra,dest_dec)\n dest_name =target['name']\n except:\n print (\"Could not execute project due to poorly formatted or corrupt RA or Dec in project_targets\")\n g_dev['obs'].send_to_user(\"Could not execute project due to poorly formatted or corrupt RA or Dec in project_targets\", p_level='INFO')\n continue\n\n if enc_status['shutter_status'] in ['Closed', 'closed'] and ocn_status['hold_duration'] <= 0.1: #NB # \\ NB NB 20220901 WER fix this!\n\n #breakpoint()\n g_dev['enc'].open_command({}, {})\n print(\"Opening dome, will set Synchronize in 10 seconds.\")\n time.sleep(10)\n g_dev['enc'].sync_mount_command({}, {})\n\n '''\n We be starting a block:\n Open dome if alt Sun < 5 degrees\n Unpark telescope\n Slave the Dome\n Go to Az of the target and take a 15 second W Square\n exposure -- better go to a tycho star near\n the aimpoint at Alt ~30-35 Take an exposure, try to solve\n an possibly synch. But be above any horizon\n effects.\n\n THen autofocus, then finally go to the object\n whihc could be below Alt of 30.\n all of aboe for first of night then at start of a block\n do the square target check, then AF, then block, depending\n on AF more Frequently setting.\n\n Consider a target check and even synch after a flip.\n\n\n '''\n try:\n g_dev['mnt'].get_mount_coordinates()\n except:\n pass\n g_dev['mnt'].go_coord(dest_ra, dest_dec)\n #self.redis_server.set('sync_enc', True, ex=1200) #Should be redundant\n print(\"CAUTION: rotator may block\")\n pa = float(block_specification['project']['project_constraints']['position_angle'])\n if abs(pa) > 0.01:\n try:\n\n g_dev['rot'].rotator.MoveAbsolute(pa) #Skip rotator move if nominally 0\n except:\n pass\n\n\n #Compute how many to do.\n left_to_do = 0\n ended = False\n # NB NB NB Any mosaic larger than +SQ should be specified in degrees and be square\n # NB NB NB NB this is the source of a big error$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$!!!! WER 20220814\n for exposure in block['project']['exposures']:\n multiplex = 0\n if exposure['area'] in ['300', '300%', 300, '220', '220%', 220, '150', '150%', 150, '250', '250%', 250]:\n if block_specification['project']['project_constraints']['add_center_to_mosaic']:\n multiplex = 5\n else:\n multiplex = 4\n if exposure['area'] in ['600', '600%', 600, '450', '450%', 450]:\n multiplex = 16\n if exposure['area'] in ['500', '500%', 500]:\n if block_specification['project']['project_constraints']['add_center_to_mosaic']:\n multiplex = 7\n else:\n multiplex = 6\n if exposure['area'] in ['+SQ', '133%']:\n multiplex = 2\n if multiplex > 1:\n left_to_do += int(exposure['count'])*multiplex\n exposure['count'] = int(exposure['count'])*multiplex #Do not multiply the count string value as a dict entry!\n print('# of mosaic panes: ', multiplex)\n else:\n left_to_do += int(exposure['count'])\n print('Singleton image')\n\n print(\"Left to do initial value: \", left_to_do)\n req = {'target': 'near_tycho_star'}\n initial_focus = True\n af_delay = 45*60 #This must be a big number!\n\n while left_to_do > 0 and not ended:\n\n #just_focused = True ###DEBUG\n if initial_focus: # and False:\n #print(\"Enc Status: \", g_dev['enc'].get_status())\n\n\n # if not g_dev['enc'].shutter_is_closed:\n self.auto_focus_script(req2, opt, throw = 600)\n # pass\n # else:\n # print('Shutter closed, skipping AF cycle.0') #coarse_focus_script can be used here\n just_focused = True\n initial_focus = False # Make above on-time event per block\n timer = time.time() + af_delay # 45 minutes\n #at block startup this should mean two AF cycles. Cosider using 5-point for the first.\n\n #cycle through exposures decrementing counts MAY want to double check left-to do but do nut remultiply by 4\n for exposure in block['project']['exposures']:\n # if block_specification['project']['project_constraints']['frequent_autofocus'] == True and (time.time() - timer) >= 0:\n # #What purpose does this code serve, it appears to be a debug remnant? WER 20200206\n # if not g_dev['enc'].shutter_is_closed:\n # self.auto_focus_script(req2, opt, throw = 500) # Should need less throw.\n # else:\n # print('Shutter closed, skipping AF cycle.0')\n initial_focus = False\n just_focused = True\n timer = time.time() + af_delay #40 minutes to refocus\n #print (block['project']['project_name'])\n # MTF - Allocate the project target name to object name\n print (\"Observing \" + str(block['project']['project_targets'][0]['name']))\n #opt['object_name']=block['project']['project_targets'][0]['name']\n print(\"Executing: \", exposure, left_to_do)\n color = exposure['filter']\n exp_time = float(exposure['exposure'])\n #dither = exposure['dither']\n if exposure['bin'] in[0, '0', '0,0', '0 0']:\n #if g_dev['cam'].config['camera']['camera_1_1']['settings']['default_bin'][0] == 1: # WER\n # binning = '1 1'\n tempBinString=str(g_dev['cam'].config['camera']['camera_1_1']['settings']['default_bin'][0])\n binning = tempBinString + ' ' + tempBinString\n elif exposure['bin'] in [2, '2,2', '2, 2', '2 2']:\n binning = '2 2'\n elif exposure['bin'] in [3, '3,3', '3, 3', '3 3']:\n binning = '3 3'\n elif exposure['bin'] in [4, '4,4', '4, 4', '4 4']:\n binning = '4 4'\n else:\n binning = '1 1'\n count = int(exposure['count'])\n # We should add a frame repeat count\n imtype = exposure['imtype']\n #defocus = exposure['defocus']\n# if g_dev['site'] == 'saf': #THis should be in config.\n # if color[0] == 'B':\n # color = 'PB' #Map generic filters to site specific ones. NB this does no tbelong here, it should be central with Cameras setup.\n # if color[0] == 'G':\n # color = 'PG' # NB NB THis needs a clean up, these mappings should be in config\n # if color[0] == 'R':\n # color = 'PR'\n # if color[0] == 'L':\n # color = 'PL'\n # if color[0] == 'W':\n # color = 'w'\n # if color[0] == 'g':\n # color = 'gp'\n # if color[0] == 'r': #NB This is redundant for Sloans when small cap.\n # color = 'rp'\n # if color[0] == 'i': #NB NB THIS IS WRONG For Johnson and Bessell\n # color = 'ip'\n # if color[0] == 'H':\n # color = 'HA'\n # if color[0] == 'O':\n # color = 'O3'\n # if color[0] == 'S':\n # color = 'S2'\n # if color[0] == 'C':\n # color = 'CR'\n if count <= 0:\n continue\n #At this point we have 1 to 9 exposures to make in this filter. Note different areas can be defined.\n if exposure['area'] in ['300', '300%', 300, '220', '220%', 220, '150', '150%', 150, ]: # 4 or 5 expsoures.\n if block_specification['project']['project_constraints']['add_center_to_mosaic']:\n offset = [(0.0, 0.0), (-1.5, 1.), (1.5, 1.), (1.5, -1.), (-1.5, -1.)] #Aimpoint + Four mosaic quadrants 36 x 24mm chip\n pane = 0\n else:\n offset = [(-1, 1.), (1, 1.), (1, -1.), (-1, -1.)] #Four mosaic quadrants 36 x 24mm chip\n pane = 1\n #Exact details of the expansions need to be calculated for accurate naming. 20201215 WER\n if exposure['area'] in ['300', '300%', 300]:\n pitch = 0.3125\n if exposure['area'] in ['220', '220%', 220]:\n pitch = 0.25\n if exposure['area'] in ['150', '150%', 150]:\n pitch = 0.1875\n\n elif exposure['area'] in ['600', '600%', '4x4d', '4x4']:\n offset = [(0,0), (-1, 0), (-1, 0.9), (-1, 1.8), (0, 1.8), (1, 1.8), (2, 0.9), (1, 0.9), (0, 0.9), \\\n (2, 0), (1, 0), (1, -0.9), (0, -0.9), (-1, -0.9), (-1, -1.8), (0, -1.8), (1, -1.8)]\n #((2, -1,8), (2, -0.9), (2, 1.8)) # Dead areas for star fill-in.\n pitch = -1 #A signal to do something special. ##'600', '600%', 600,\n elif exposure['area'] in ['2x2', '500%']:\n offset= [(0,0), (-0.5, 0), (-0.5, .35), (0.5, 0.35), (0.5, 0), (-0.5, -0.35), (0.5, -0.35), ]\n pitch = 1\n elif exposure['area'] in ['450', '450%', 450]:\n pitch = 0.250\n pane = 0\n # elif exposure['area'] in ['500', '500%',]: # 6 or 7 exposures. SQUARE\n # step = 1.466667\n # if block_specification['project']['project_constraints']['add_center_to_mosaic']:\n # offset = [(0., 0.), (-1, 0.), (-1, step), (1, step), (1, 0), \\\n # (1, -step), (-1, -step)] #Aimpoint + six mosaic quadrants 36 x 24mm chip\n # pane = 0\n # else:\n # offset = [(-1, 0.), (-1, step), (1, step), (1, 0), \\\n # (1, -step), (-1, -step)] #Six mosaic quadrants 36 x 24mm chip\n # pane = 1\n # pitch = .375\n elif exposure['area'] in ['+SQ', '133%']: # 2 exposures. SQUARE\n step = 1\n offset = [(0, -1), (0, 1)] #Two mosaic steps 36 x 24mm chip Square\n pane = 1\n pitch = 0.25#*2 #Try this out for small overlap and tall field. 20220218 04:12 WER\n else:\n offset = [(0., 0.)] #Zero(no) mosaic offset\n pitch = 0.\n pane = 0\n for displacement in offset:\n\n x_field_deg = g_dev['cam'].config['camera']['camera_1_1']['settings']['x_field_deg']\n y_field_deg = g_dev['cam'].config['camera']['camera_1_1']['settings']['y_field_deg']\n if pitch == -1:\n #Note positive offset means a negative displacement in RA for spiral to wrap CCW.\n #Note offsets are absolute degrees.\n d_ra = -displacement[0]/15.\n d_dec = displacement[1]\n else:\n d_ra = displacement[0]*(pitch)*(x_field_deg/15.) # 0.764243 deg = 0.0509496 Hours These and pixscale should be computed in config.\n d_dec = displacement[1]*( pitch)*(y_field_deg) # = 0.5102414999999999 #Deg\n new_ra = dest_ra + d_ra\n new_dec= dest_dec + d_dec\n new_ra, new_dec = ra_dec_fix_hd(new_ra, new_dec)\n # offset = 44514.552766203706\n # moon_time = ephem.now() - offset + 78/1440\n # moon_ra = 0.787166667*moon_time + 1.0219444 + 0.01 - t*0.00025\n # moon_dec = 8.3001964784*math.pow(moon_time, 0.6719299333) - 0.125 + t*0.002\n # new_ra = moon_ra\n # new_dec = moon_dec\n print('Seeking to: ', new_ra, new_dec)\n g_dev['mnt'].go_coord(new_ra, new_dec) # This needs full angle checks\n if not just_focused:\n g_dev['foc'].adjust_focus()\n just_focused = False\n if imtype in ['light'] and count > 0:\n req = {'time': exp_time, 'alias': str(self.config['camera']['camera_1_1']['name']), 'image_type': imtype} # NB Should pick up filter and constants from config\n opt = {'area': 150, 'count': 1, 'bin': binning, 'filter': color, \\\n 'hint': block['project_id'] + \"##\" + dest_name, 'object_name': block['project']['project_targets'][0]['name'], 'pane': pane}\n print('Seq Blk sent to camera: ', req, opt)\n obs_win_begin, sunZ88Op, sunZ88Cl, ephem_now = self.astro_events.getSunEvents()\n\n now_date_timeZ = datetime.datetime.now().isoformat().split('.')[0] +'Z'\n if now_date_timeZ >= block['end'] :\n break\n result = g_dev['cam'].expose_command(req, opt, no_AWS=False, solve_it=False)\n try:\n if result['stopped'] is True:\n g_dev['obs'].send_to_user(\"Project Stopped because Exposure cancelled\")\n return block_specification\n except:\n pass\n t +=1\n count -= 1\n exposure['count'] = count\n left_to_do -= 1\n print(\"Left to do: \", left_to_do)\n # offset = 44514.552766203706\n # moon_time = ephem.now() - offset + 78/1440\n # moon_ra = 0.787166667*moon_time + 1.0219444 + 0.01 + t*0.0001\n # moon_dec = 8.3001964784*math.pow(moon_time, 0.6719299333) - 0.125 - t*0.01\n # new_ra = moon_ra\n # new_dec = moon_dec\n pane += 1\n\n now_date_timeZ = datetime.datetime.now().isoformat().split('.')[0] +'Z'\n ephem_now = ephem.now()\n events = g_dev['events']\n\n ended = left_to_do <= 0 or now_date_timeZ >= block['end'] \\\n or ephem.now() >= events['Observing Ends']\n # ]\\\n # or g_dev['airmass'] > float( block_specification['project']['project_constraints']['max_airmass']) \\\n # or abs(g_dev['ha']) > float(block_specification['project']['project_constraints']['max_ha'])\n # # Or mount has flipped, too low, too bright, entering zenith..\n\n print(\"Project block has finished!\") #NB Should we consider turning off mount tracking?\n if block_specification['project']['project_constraints']['close_on_block_completion']:\n #g_dev['mnt'].park_command({}, {})\n # NB NBNeed to write a more robust and generalized clean up.\n try:\n pass#g_dev['enc'].enclosure.Slaved = False NB with wema no longer exists\n except:\n pass\n #self.redis_server.set('unsync_enc', True, ex=1200)\n #g_dev['enc'].close_command({}, {})\n g_dev['mnt'].park_command({}, {})\n\n print(\"Auto PARK (not Close) attempted at end of block.\")\n self.block_guard = False\n return block_specification #used to flush the queue as it completes.\n\n\n def bias_dark_script(self, req=None, opt=None, morn=False):\n \"\"\"\n\n 20200618 This has been drastically simplied for now to deal with only QHY600M.\n\n May still have a bug where it latches up only outputting 2x2 frames.\n\n \"\"\"\n\n self.sequencer_hold = True\n self.current_script = 'Bias Dark'\n if morn:\n ending = g_dev['events']['End Morn Bias Dark']\n else:\n ending = g_dev['events']['End Eve Bias Dark']\n while ephem.now() < ending : #Do not overrun the window end\n\n g_dev['mnt'].park_command({}, {}) # Get there early\n\n print(\"Expose Biases: by configured binning; normal and long darks.\")\n\n # 'bin_enable': ['1 1'],\n # 'ref_dak': 360.0,\n # 'long_dark': 600.0,\n dark_time = self.config['camera']['camera_1_1']['settings']['ref_dark']\n long_dark_time = self.config['camera']['camera_1_1']['settings']['long_dark']\n\n\n for bias in range(9): #9*(9 +1) per cycle.\n if ephem.now() + 210/86400 > ending:\n break\n if \"1 1\" in self.config['camera']['camera_1_1']['settings']['bin_enable']:\n req = {'time': 0.0, 'script': 'True', 'image_type': 'bias'}\n opt = {'area': \"Full\", 'count': 9, 'bin':'1 1', \\\n 'filter': 'dark'}\n print(\"Expose b_1\")\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n do_sep=False, quick=False)\n g_dev['obs'].update_status()\n dark_time = 360\n if ephem.now() + (dark_time + 30)/86400 > ending:\n break\n print(\"Expose ref_dark using exposure: \", dark_time )\n req = {'time':dark_time , 'script': 'True', 'image_type': 'dark'}\n opt = {'area': \"Full\", 'count':1, 'bin': '1 1', \\\n 'filter': 'dark'}\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n do_sep=False, quick=False)\n\n g_dev['obs'].update_status()\n if long_dark_time is not None and long_dark_time > dark_time:\n\n if ephem.now() + (long_dark_time + 30)/86400 > ending:\n break\n print(\"Expose long dark using exposure: \", long_dark_time)\n req = {'time':long_dark_time , 'script': 'True', 'image_type': 'dark'}\n opt = {'area': \"Full\", 'count':1, 'bin': '1 1', \\\n 'filter': 'dark'}\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n do_sep=False, quick=False)\n\n g_dev['obs'].update_status()\n if \"2 2\" in self.config['camera']['camera_1_1']['settings']['bin_enable']:\n req = {'time': 0.0, 'script': 'True', 'image_type': 'bias'}\n opt = {'area': \"Full\", 'count': 9, 'bin':'2 2', \\\n 'filter': 'dark'}\n print(\"Expose b_1\")\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n do_sep=False, quick=False)\n g_dev['obs'].update_status()\n dark_time = 360\n if ephem.now() + (dark_time + 30)/86400 > ending:\n break\n print(\"Expose ref_dark using exposure: \", dark_time )\n req = {'time':dark_time , 'script': 'True', 'image_type': 'dark'}\n opt = {'area': \"Full\", 'count':1, 'bin': '2 2', \\\n 'filter': 'dark'}\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n do_sep=False, quick=False)\n\n g_dev['obs'].update_status()\n if long_dark_time is not None and long_dark_time > dark_time:\n\n if ephem.now() + (long_dark_time + 30)/86400 > ending:\n break\n print(\"Expose long dark using exposure: \", long_dark_time)\n req = {'time':long_dark_time , 'script': 'True', 'image_type': 'dark'}\n opt = {'area': \"Full\", 'count':1, 'bin': '2 2', \\\n 'filter': 'dark'}\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n do_sep=False, quick=False)\n\n g_dev['obs'].update_status()\n # if ephem.now() + 210/86400 > ending:\n # break\n # print(\"Expose Biases: b_2\")\n # #dark_time =600\n # #for bias in range(9):\n # req = {'time': 0.0, 'script': 'True', 'image_type': 'bias'}\n # opt = {'area': \"Full\", 'count': 7, 'bin': '2 2', \\\n # 'filter': 'dark'}\n # result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n # do_sep=False, quick=False)\n\n # g_dev['obs'].update_status()\n # dark_time = 300\n # if ephem.now() >= (dark_time + 30)/86400 > ending:\n # break\n # print(\"Expose d_2 using exposure: \", dark_time )\n # req = {'time':dark_time , 'script': 'True', 'image_type': 'dark'}\n # opt = {'area': \"Full\", 'count':1, 'bin': '2 2', \\\n # 'filter': 'dark'}\n # result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n # do_sep=False, quick=False)\n\n # g_dev['obs'].update_status()\n # if ephem.now() + 210/86400 > ending:\n # break\n\n # if self.config['site'] != 'mrc2': #NB Please implement in the site config not in-line.\n\n # print(\"Expose Biases: b_3\")\n # dark_time = 300\n # #for bias in range(9):\n # req = {'time': 0.0, 'script': 'True', 'image_type': 'bias'}\n # opt = {'area': \"Full\", 'count': 7, 'bin':'3 3', \\\n # 'filter': 'dark'}\n # result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n # do_sep=False, quick=False)\n # g_dev['obs'].update_status()\n # if ephem.now() >= (dark_time + 30)/86400 > ending:\n # break\n # print(\"Expose d_3 using exposure: \", dark_time )\n # req = {'time':dark_time, 'script': 'True', 'image_type': 'dark'}\n # opt = {'area': \"Full\", 'count':1, 'bin':'3 3', \\\n # 'filter': 'dark'}\n # result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n # do_sep=False, quick=False)\n # print('Last dark result: ', result)\n # g_dev['obs'].update_status()\n\n # if ephem.now() + 210/86400 > ending:\n # break\n # print(\"Expose Biases: b_4\")\n # dark_time = 240\n # #for bias in range(9):\n # req = {'time': 0.0, 'script': 'True', 'image_type': 'bias'}\n # opt = {'area': \"Full\", 'count': 7, 'bin':'4 4', \\\n # 'filter': 'dark'}\n # result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n # do_sep=False, quick=False)\n\n # g_dev['obs'].update_status()\n # if ephem.now() + (dark_time + 30)/86400 > ending:\n # break\n # print(\"Expose d_4 using exposure: \", dark_time )\n # req = {'time':dark_time , 'script': 'True', 'image_type': 'dark'}\n # opt = {'area': \"Full\", 'count':1, 'bin': '4 4', \\\n # 'filter': 'dark'}\n # result = g_dev['cam'].expose_command(req, opt, no_AWS=True, \\\n # do_sep=False, quick=False)\n\n g_dev['obs'].update_status()\n if ephem.now() + 30/86400 >= ending:\n break\n\n print(\" Bias/Dark acquisition is finished normally.\")\n\n\n\n self.sequencer_hold = False\n g_dev['mnt'].park_command({}, {}) # Get there early\n print(\"Bias/Dark Phase has passed.\")\n\n\n\n if morn:\n # UNDERTAKING END OF NIGHT ROUTINES\n\n print (\"sending end of night token to AWS\")\n #g_dev['cam'].enqueue_for_AWS(jpeg_data_size, paths['im_path'], paths['jpeg_name10'])\n yesterday = datetime.datetime.now() - timedelta(1)\n #print (datetime.datetime.strftime(yesterday, '%Y%m%d'))\n runNight=datetime.datetime.strftime(yesterday, '%Y%m%d')\n isExist = os.path.exists(g_dev['cam'].site_path + 'tokens')\n if not isExist:\n os.makedirs(g_dev['cam'].site_path + 'tokens')\n runNightToken= g_dev['cam'].site_path + 'tokens/' + self.config['site'] + runNight + '.token'\n with open(runNightToken, 'w') as f:\n f.write('Night Completed')\n g_dev['obs'].aws_queue.put((30000000, runNightToken), block=False)\n g_dev['obs'].send_to_user(\"End of Night Token sent to AWS.\", p_level='INFO')\n\n # Culling the archive\n FORTNIGHT=60*60*24*7*2\n #dir_path='D:/PTRMFO/'\n\n dir_path=self.config['client_path'] + '\\\\' + 'archive'\n cameras=[d for d in os.listdir(dir_path) if os.path.isdir(d)]\n for camera in cameras: # Go through each camera directory\n print (\"*****************************************\")\n print (\"Camera: \" + str(camera))\n timenow_cull=time.time()\n cameradir=dir_path + '\\\\' + camera + '\\\\'\n directories=[d for d in os.listdir(cameradir) if os.path.isdir(d)]\n deleteDirectories=[]\n deleteTimes=[]\n for q in range(len(directories)):\n if ((timenow_cull)-os.path.getmtime(cameradir + directories[q])) > FORTNIGHT:\n deleteDirectories.append(directories[q])\n deleteTimes.append(((timenow_cull)-os.path.getmtime(cameradir +directories[q])) /60/60/24/7)\n\n\n\n print (\"These are the directories earmarked for \")\n print (\"Eternal destruction. And how old they are\")\n print (\"in weeks\\n\")\n g_dev['obs'].send_to_user(\"Culling \" + str(len(deleteDirectories)) +\" from the local archive.\", p_level='INFO')\n for entry in range(len(deleteDirectories)):\n print (deleteDirectories[entry] + ' ' + str(deleteTimes[entry]) + ' weeks old.')\n #shutil.rmtree(cameradir + deleteDirectories[entry]) # THIS IS THE DELETER WHEN WE ARE READY!\n\n # Reopening config\n imp.reload(config)\n self.config = config\n # Getting new times for the new day\n #self.astro_events = ptr_events.Events(self.config)\n self.astro_events.compute_day_directory()\n self.astro_events.display_events()\n # sending this up to AWS\n self.update_config()\n\n return\n\n\n\n def sky_flat_script(self, req, opt, morn=False):\n \"\"\"\n\n If entered, put up a guard.\n if open conditions are acceptable then take a dark image of a dark screen, just for\n reference.\n Open the dome,\n GoTo flat spot, expose, rotating through 3 filters pick least sensitive\n discard overexposures, keep rotating. once one of the three yeilds a good\n exposure, repeat four more times, then drop that filter from list, add a new one\n and proceed to loop. This should allow us to generate the sensitivity list in\n the right order and not fill the system up will overexposed files. Ultimatley\n we wait for the correct sky condition once we have the calibrations so as to not\n wear out the shutter.\n Non photometric shutters need longer exposure times.\n Note with alt-az mount we could get very near the zenith zone.\n Note we want Moon at least 30 degrees away\n\n 20220821 New try at this code\n Set up parameters for the site, camera, etc.\n set up 'end-time'. Calling into this happens elesewhere at the prescribed start time\n Pick forward or reverse filter list depemnding on Eve or Morn flats. -- \"the pop-list\"\n flat count = 3\n scale = 1, used to drive exposure to ~32500ADU That is the target_flat value\n prior scale = 1 When changing filters apply this scale so we do not wast time. This\n is intended to fix the problem the gain estimates are wrong.\n while len(pop_list) > 0 and ephem.now() < ending:\n Get the filter, its 'gain'\n go the the solar flat spot (Tel should be there earlier)\n possibly here if not on flat spot or roof not open:\n time.sleep(10)\n continue the loop\n\n (Note if SRO roof opens late we are likely behinf the 8-ball and we waste time\n on the Narrow Band filters.)\n calculate exposure (for S2 filter if Night, PL filter if morning.)\n if evening and exposure > 180 sec sky is too dark for that filter so:\n pop that filter\n flat count = 3\n continue the loop\n if morning and exposure < 1 sec then sky too bright for that filter so:\n pop tht tilter\n flat count = 3\n continue the loop\n\n Here I think we need another loop that gets the number of flats or pops\n the filter and then continues the above loop.\n Tries = 6 #basically prevent a spin on one filter from eating up the window.\n While flatcount > 0 and tries > 0 and ephem.now() < ending:\n Expose the filter for the computed time.\n Now lets fix the convoluted code.\n The central patch should ideally be ~= target flat, so\n scale = target_flat/patch, avoiding the obvious divide by zero. A problem\n here is if Patch is >> 65,000 we only scale exposure by about half. So it makes\n some sense to cut it down more so we converge faster. (Scaling up seems to work\n on the first pass.)\n\n if patch is say 30000 <= patch <= 35000, accept the exposure as a valid flat:\n flatcount -= 1\n tried =- 1\n scale = prior_scale*target_flat/patch #prior _scale is 1.0\n elif outside that range\n tried =- 1\n scale = prior_scale*target_flat/patch as adjusted by the above paragraph.\n\n Next step is a bit subtle. if the loop is going to fail because with the flat_count\n or tries are exceeded we need to set up prior_scale. The theory is if the session worked\n perfect we end with an effective scale on 1. But the sky fades very fast so to do this\n right we need somthing more like an average-scale. However for now, keep it simple.\n So the assumption is is the scale for the s2 filter to expose correctly is 0.9 then\n the S2 signal is \"bright\". So we put that factor into prior scale so when we move to HA\n the system will bias the first HA exposure assuming it will be bright for that band as well.\n\n What I have seen so far is there is variation night to night is the sky transmission in the\n red bands. Add that to the fast chages is skybrighness after SRO opens and ... challenging.\n\n Note in old code I try recomputing the \"gain\". Ideally a better way to do this would be to\n create a persisten gain list of say the last 7 successful nights per filter of course and then\n seed the above more accurately.\n\n Now once we get rid of CCD cameras this becomes a bit easier since min exposure can be 0.0001 sec.\n But readout time then starts to dominate. All fine you say but if we have a full wheel of filters\n then haveing only 35 or so minutes is still limiting.\n\n I am going to push this to Git right now so MFitz can comment. Then i will get back to the pseudo code.\n\n\n\n\n\n\n\n\n \"\"\"\n\n self.sky_guard = True #20220409 I think this is obsolete or unused.\n print('Sky Flat sequence Starting, Enclosure PRESUMED Open. Telescope should be on sky flat spot.')\n\n g_dev['obs'].send_to_user('Sky Flat sequence Starting, Enclosure PRESUMED Open. Telescope should be on sky flat spot.', p_level='INFO')\n evening = not morn\n camera_name = str(self.config['camera']['camera_1_1']['name'])\n flat_count = 5\n min_exposure = float(self.config['camera']['camera_1_1']['settings']['min_exposure'])\n bin_spec = '1,1'\n try:\n bin_spec = self.config['camera']['camera_1_1']['settings']['flat_bin_spec']\n except:\n pass\n exp_time = min_exposure # added 20220207 WER 0.2 sec for SRO\n\n\n # Pick up list of filters is sky flat order of lowest to highest transparency.\n pop_list = self.config['filter_wheel']['filter_wheel1']['settings']['filter_sky_sort'].copy()\n\n if morn:\n pop_list.reverse()\n print('filters by high to low transmission: ', pop_list)\n ending = g_dev['events']['End Morn Sky Flats']\n else:\n print('filters by low to high transmission: ', pop_list)\n ending = g_dev['events']['End Eve Sky Flats']\n #length = len(pop_list)\n obs_win_begin, sunset, sunrise, ephem_now = self.astro_events.getSunEvents()\n exp_time = 0\n scale = 1.0\n prior_scale = 1 #THIS will be inhereted upon completion of the prior filter\n collecting_area = self.config['telescope']['telescope1']['collecting_area']/31808. # SAF at F4.9 is the reference\n # and (g_dev['events']['Eve Sky Flats'] <\n\n while len(pop_list) > 0 and ephem.now() < ending:\n\n current_filter = int(pop_list[0])\n acquired_count = 0\n #req = {'filter': current_filter}\n #opt = {'filter': current_filter}\n\n g_dev['fil'].set_number_command(current_filter) # 20220825 NB NB NB Change this to using a list of filter names.\n g_dev['mnt'].slewToSkyFlatAsync()\n target_flat = 30000\n #scale = 1.0 #1.15 #20201121 adjustment\n\n\n\n # if not g_dev['enc'].status['shutter_status'] in ['Open', 'open']:\n # g_dev['obs'].send_to_user(\"Wait for roof to be open to take skyflats. 60 sec delay loop.\", p_level='INFO')\n # time.sleep(60)\n # g_dev['obs'].update_status()\n # continue\n while (acquired_count < flat_count):# and g_dev['enc'].status['shutter_status'] in ['Open', 'open']: # NB NB NB and roof is OPEN! and (ephem_now +3/1440) < g_dev['events']['End Eve Sky Flats' ]:\n #if g_dev['enc'].is_dome: #Does not apply\n g_dev['mnt'].slewToSkyFlatAsync() #FRequently do this to dither.\n g_dev['obs'].update_status()\n\n try:\n try:\n sky_lux = eval(self.redis_server.get('ocn_status'))['calc_HSI_lux'] #Why Eval, whould have float?\n except:\n #print(\"Redis not running. lux set to 1000.\")\n sky_lux = float(g_dev['ocn'].status['calc_HSI_lux'])\n\n exp_time = prior_scale*scale*target_flat/(collecting_area*sky_lux*float(g_dev['fil'].filter_data[current_filter][3])) #g_dev['ocn'].calc_HSI_lux) #meas_sky_lux)\n print('Ex: ', exp_time, scale, prior_scale, sky_lux, float(g_dev['fil'].filter_data[current_filter][3]))\n\n if evening and exp_time > 120:\n #exp_time = 60 #Live with this limit. Basically started too late\n print('Break because proposed evening exposure > 180 seconds: ', exp_time)\n g_dev['obs'].send_to_user('Try next filter because proposed flat exposure > 180 seconds.', p_level='INFO')\n pop_list.pop(0)\n break\n if morn and exp_time < min_exposure:\n #exp_time = 60 #Live with this limit. Basically started too late\n print('Break because proposed evening exposure > 180 seconds: ', exp_time)\n g_dev['obs'].send_to_user('Try next filter because proposed flat exposure < min_exposure.', p_level='INFO')\n pop_list.pop(0)\n break\n if evening and exp_time < min_exposure: #NB it is too bright, should consider a delay here.\n #**************THIS SHOUD BE A WHILE LOOP! WAITING FOR THE SKY TO GET DARK AND EXP TIME TO BE LONGER********************\n print(\"Too bright, wating 180 seconds.\")\n g_dev['obs'].send_to_user('Delay 180 seconds to let it get darker.', p_level='INFO')\n time.sleep(180)\n if morn and exp_time > 120 : #NB it is too bright, should consider a delay here.\n #**************THIS SHOUD BE A WHILE LOOP! WAITING FOR THE SKY TO GET DARK AND EXP TIME TO BE LONGER********************\n print(\"Too dim, wating 180 seconds.\")\n g_dev['obs'].send_to_user('Delay 180 seconds to let it get lighterer.', p_level='INFO')\n time.sleep(180)\n #*****************NB Recompute exposure or otherwise wait\n exp_time = min_exposure\n exp_time = round(exp_time, 5)\n # prior_scale = prior_scale*scale #Only update prior scale when changing filters\n print(\"Sky flat estimated exposure time, scale are: \", exp_time, scale)\n except:\n exp_time = 0.3\n req = {'time': float(exp_time), 'alias': camera_name, 'image_type': 'sky flat', 'script': 'On'}\n opt = { 'count': 1, 'bin': bin_spec, 'area': 150, 'filter': g_dev['fil'].filter_data[current_filter][0]} #nb nb nb BIN CHNAGED FROM 2,2 ON 20220618 wer\n print(\"using: \", g_dev['fil'].filter_data[current_filter][0])\n if ephem.now() >= ending:\n return\n try:\n\n fred = g_dev['cam'].expose_command(req, opt, no_AWS=True, do_sep = False)\n\n bright = fred['patch'] # Patch should be circular and 20% of Chip area. ToDo project\n print('Returned: ', bright)\n except:\n print(\"*****NO result returned***** Will need to restart Camera\") #NB NB NB this is drastic action needed.\n g_dev['obs'].update_status()\n continue\n g_dev['obs'].update_status()\n try:\n\n scale *= target_flat /bright #Note we are scaling the scale\n print(\"New scale is: \", scale)\n if scale > 5000:\n scale = 5000\n if scale < 0.01:\n scale = 0.01\n except:\n scale = 1.0\n\n print('\\n\\n', \"Patch/Bright: \", bright, g_dev['fil'].filter_data[current_filter][0], \\\n 'New Gain value: ', round(bright/(sky_lux*collecting_area*exp_time), 3), '\\n\\n')\n\n obs_win_begin, sunset, sunrise, ephem_now = self.astro_events.getSunEvents()\n # THE following code looks like a debug patch gone rogue.\n\n if bright > 35000 and (ephem.now() < ending): #NB should gate with end of skyflat window as well.\n for i in range(1):\n time.sleep(2) # #0 seconds of wait time. Maybe shorten for wide bands?\n g_dev['obs'].update_status()\n else:\n acquired_count += 1\n if acquired_count == flat_count:\n pop_list.pop(0)\n print(\"SCALE USED *************************: \", scale)\n prior_scale = scale #Here is where we pre-scale the next filter. TEMPORARILLY TAKE THIS OUT\n scale = 1\n\n obs_win_begin, sunset, sunrise, ephem_now = self.astro_events.getSunEvents()\n g_dev['obs'].update_status()\n continue\n if morn is False:\n g_dev['mnt'].tracking = False # park_command({}, {}) # NB this is provisional, Ok when simulating\n self.eve_sky_flat_latch = False\n elif morn:\n try:\n g_dev['mnt'].park_command({}, {})\n except:\n print(\"Mount did not park at end of morning skyflats.\")\n self.morn_sky_flat_latch = False\n print('\\nSky flat complete, or too early. Telescope Tracking is off.\\n')\n self.sky_guard = False\n\n\n def screen_flat_script(self, req, opt):\n if req['numFrames'] > 1:\n flat_count = req['numFrames']\n else:\n flat_count = 1 # A dedugging compromise\n\n # NB here we need to check cam at reasonable temp, or dwell until it is.\n\n camera_name = str(self.config['camera']['camera_1_1']['name'])\n dark_count = 1\n exp_time = 15\n if flat_count < 1: flat_count = 1\n g_dev['mnt'].park_command({}, {})\n # NB: g_dev['enc'].close\n g_dev['obs'].update_status()\n g_dev['scr'].set_screen_bright(0)\n g_dev['scr'].screen_dark()\n time.sleep(5)\n g_dev['obs'].update_status()\n #Here we need to switch off any IR or dome lighting.\n #Take a 10 s dark screen air flat to record ambient\n # Park Telescope\n req = {'time': exp_time, 'alias': camera_name, 'image_type': 'screen flat'}\n opt = {'area': 100, 'count': dark_count, 'filter': 'dark', 'hint': 'screen dark'} # air has highest throughput\n\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True)\n print('First dark 30-sec patch, filter = \"air\": ', result['patch'])\n # g_dev['scr'].screen_light_on()\n\n for filt in g_dev['fil'].filter_screen_sort:\n #enter with screen dark\n filter_number = int(filt)\n print(filter_number, g_dev['fil'].filter_data[filter_number][0])\n screen_setting = g_dev['fil'].filter_data[filter_number][4][1]\n g_dev['scr'].set_screen_bright(0)\n g_dev['scr'].screen_dark()\n time.sleep(5)\n exp_time = g_dev['fil'].filter_data[filter_number][4][0]\n g_dev['obs'].update_status()\n print('Dark Screen; filter, bright: ', filter_number, 0)\n req = {'time': float(exp_time), 'alias': camera_name, 'image_type': 'screen flat'}\n opt = {'area': 100, 'count': 1, 'filter': g_dev['fil'].filter_data[filter_number][0], 'hint': 'screen pre-filter dark'}\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True)\n print(\"Dark Screen flat, starting: \", result['patch'], g_dev['fil'].filter_data[filter_number][0], '\\n\\n')\n g_dev['obs'].update_status()\n print('Lighted Screen; filter, bright: ', filter_number, screen_setting)\n g_dev['scr'].set_screen_bright(int(screen_setting))\n g_dev['scr'].screen_light_on()\n time.sleep(10)\n # g_dev['obs'].update_status()\n # time.sleep(10)\n # g_dev['obs'].update_status()\n # time.sleep(10)\n g_dev['obs'].update_status()\n req = {'time': float(exp_time), 'alias': camera_name, 'image_type': 'screen flat'}\n opt = {'area': 100, 'count': flat_count, 'filter': g_dev['fil'].filter_data[filter_number][0], 'hint': 'screen filter light'}\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True)\n # if no exposure, wait 10 sec\n print(\"Lighted Screen flat: \", result['patch'], g_dev['fil'].filter_data[filter_number][0], '\\n\\n')\n g_dev['obs'].update_status()\n g_dev['scr'].set_screen_bright(0)\n g_dev['scr'].screen_dark()\n time.sleep(5)\n g_dev['obs'].update_status()\n print('Dark Screen; filter, bright: ', filter_number, 0)\n req = {'time': float(exp_time), 'alias': camera_name, 'image_type': 'screen flat'}\n opt = {'area': 100, 'count': 1, 'filter': g_dev['fil'].filter_data[filter_number][0], 'hint': 'screen post-filter dark'}\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True)\n print(\"Dark Screen flat, ending: \",result['patch'], g_dev['fil'].filter_data[filter_number][0], '\\n\\n')\n\n\n #breakpoint()\n g_dev['scr'].set_screen_bright(0)\n g_dev['scr'].screen_dark()\n g_dev['obs'].update_status()\n g_dev['mnt'].Tracking = False #park_command({}, {})\n print('Sky Flat sequence completed, Telescope tracking is off.')\n self.guard = False\n\n\n\n def auto_focus_script(self, req, opt, throw=600):\n '''\n V curve is a big move focus designed to fit two lines adjacent to the more normal focus curve.\n It finds the approximate focus, particulary for a new instrument. It requires 8 points plus\n a verify.\n Auto focus consists of three points plus a verify.\n Fine focus consists of five points plus a verify.\n Optionally individual images can be multiples of one to average out seeing.\n NBNBNB This code needs to go to known stars to be moe relaible and permit subframes\n\n Result format:\n result['mean_focus'] = avg_foc[1]\n result['mean_rotation'] = avg_rot[1]\n result['FWHM'] = spot What is returned is a close proxy to real fitted FWHM.\n result['half_FD'] = None\n result['patch'] = cal_result\n result['temperature'] = avg_foc[2] This is probably tube not reported by Gemini.\n '''\n #if self.config['site'] in ['sro']: #NB this should be a site config key in the focuser or computed from f-ratio.\n # throw = 250\n #if self.config['site'] in ['saf']: # NB NB f4.9 this belongs in config, not in the code body!!!!\n # throw = 400\n throw = g_dev['foc'].throw\n self.sequencer_hold = False #Allow comand checks.\n self.guard = False\n self.af_guard = True\n\n req2 = copy.deepcopy(req)\n opt2 = copy.deepcopy(opt)\n\n sim = False # g_dev['enc'].status['shutter_status'] in ['Closed', 'Closing', 'closed', 'closing']\n\n # try:\n # self.redis_server.set('enc_cmd', 'sync_enc', ex=1200)\n # self.redis_server.set('enc_cmd', 'open', ex=1200)\n # except:\n # pass\n #print('AF entered with: ', req, opt, '\\n .. and sim = ', sim)\n #self.sequencer_hold = True #Blocks command checks.\n #Here we jump in too fast and need for mount to settle\n\n try:\n #Check here for filter, guider, still moving THIS IS A CLASSIC\n #case where a timeout is a smart idea.\n #Wait for external motion to cease before exposing. Note this precludes satellite tracking.\n st = \"\"\n\n #20210817 g_dev['enc'] does not exist, so this faults. Cascade problem with user_id...\n while g_dev['foc'].focuser.IsMoving or g_dev['rot'].rotator.IsMoving or \\\n g_dev['mnt'].mount.Slewing: #or g_dev['enc'].status['dome_slewing']: #Filter is moving??\n if g_dev['foc'].focuser.IsMoving: st += 'f>'\n if g_dev['rot'].rotator.IsMoving: st += 'r>'\n if g_dev['mnt'].mount.Slewing: st += 'm>'\n #if g_dev['enc'].status['dome_slewing']: st += 'd>'\n print(st)\n st = \"\"\n time.sleep(0.2)\n g_dev['obs'].update_status()\n except:\n print(\"Motion check faulted.\")\n\n# ============================================================================= Save AFTER mount has settled down.\n# =============================================================================\n# =============================================================================\n start_ra = g_dev['mnt'].mount.RightAscension #Read these to go back. NB NB Need to cleanly pass these on so we can return to proper target.\n start_dec = g_dev['mnt'].mount.Declination\n focus_start = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n# =============================================================================\n# =============================================================================\n# =============================================================================\n print(\"Saved *mounting* ra, dec, focus: \", start_ra, start_dec, focus_start)\n\n if req2['target'] == 'near_tycho_star': ## 'bin', 'area' Other parameters\n\n # Go to closest Mag 7.5 Tycho * with no flip\n\n focus_star = tycho.dist_sort_targets(g_dev['mnt'].current_icrs_ra, g_dev['mnt'].current_icrs_dec, \\\n g_dev['mnt'].current_sidereal)\n print(\"Going to near focus star \" + str(focus_star[0][0]) + \" degrees away.\")\n g_dev['mnt'].go_coord(focus_star[0][1][1], focus_star[0][1][0])\n req = {'time': 12.5, 'alias': str(self.config['camera']['camera_1_1']['name']), 'image_type': 'auto_focus'} # NB Should pick up filter and constats from config\n opt = {'area': 150, 'count': 1, 'bin': '2, 2', 'filter': 'focus'}\n else:\n pass #Just take an image where currently pointed.\n req = {'time': 15, 'alias': str(self.config['camera']['camera_1_1']['name']), 'image_type': 'auto_focus'} # NB Should pick up filter and constats from config\n opt = {'area': 150, 'count': 1, 'bin': '2, 2', 'filter': 'focus'}\n foc_pos0 = focus_start\n result = {}\n #print(\"temporary patch in Sim values\")\n print('Autofocus Starting at: ', foc_pos0, '\\n\\n')\n\n\n g_dev['foc'].guarded_move((foc_pos0 - 0* throw)*g_dev['foc'].micron_to_steps) # NB added 20220209 Nasty bug, varies with prior state\n\n #throw = throw # NB again, from config. Units are microns Passed as default paramter\n retry = 0\n while retry < 3:\n if not sim:\n\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False) ## , script = 'auto_focus_script_0') # This is where we start.\n\n else:\n\n result['FWHM'] = 3\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n\n try:\n spot1 = result['FWHM']\n foc_pos1 = result['mean_focus']\n except:\n spot1 = False\n foc_pos1 = False\n print (\"spot1 failed in autofocus script\")\n\n if math.isnan(spot1) or spot1 ==False:\n retry += 1\n print(\"Retry of central focus star)\")\n continue\n else:\n break\n print('Autofocus Moving In.\\n\\n')\n\n g_dev['foc'].guarded_move((foc_pos0 - 1*throw)*g_dev['foc'].micron_to_steps)\n #opt['fwhm_sim'] = 4.\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False) ## , script = 'auto_focus_script_1') # This is moving in one throw.\n else:\n result['FWHM'] = 4\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot2 = result['FWHM']\n foc_pos2 = result['mean_focus']\n except:\n spot2 = False\n foc_pos2 = False\n print (\"spot2 failed on autofocus moving in\")\n\n print('Autofocus Overtaveling Out.\\n\\n')\n g_dev['foc'].guarded_move((foc_pos0 + 2*throw)*g_dev['foc'].micron_to_steps)\n #time.sleep(10)#It is important to overshoot to overcome any backlash WE need to be sure Exposure waits.\n print('Autofocus Moving back in half-way.\\n\\n')\n\n g_dev['foc'].guarded_move((foc_pos0 + throw)*g_dev['foc'].micron_to_steps) #NB NB NB THIS IS WRONG!\n\n #time.sleep(10)#opt['fwhm_sim'] = 5\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False) ## , script = 'auto_focus_script_2') # This is moving out one throw.\n else:\n result['FWHM'] = 4.5\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot3 = result['FWHM']\n foc_pos3 = result['mean_focus']\n except:\n spot3 = False\n foc_pos3 = False\n print (\"spot3 failed on autofocus moving in\")\n x = [foc_pos2, foc_pos1, foc_pos3]\n y = [spot2, spot1, spot3]\n print('X, Y: ', x, y, 'Desire center to be smallest.')\n if spot1 is None or spot2 is None or spot3 is None or spot1 == False or spot2 == False or spot3 == False: #New additon to stop crash when no spots\n print(\"No stars detected. Returning to original focus setting and pointing.\")\n\n g_dev['foc'].guarded_move((focus_start)*g_dev['foc'].micron_to_steps)\n self.sequencer_hold = False #Allow comand checks.\n self.af_guard = False\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec)\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n if spot1 < spot2 and spot1 < spot3:\n try:\n #Digits are to help out pdb commands!\n a1, b1, c1, d1 = fit_quadratic(x, y)\n new_spot = round(a1*d1*d1 + b1*d1 + c1, 2)\n\n except:\n\n print('Autofocus quadratic equation not converge. Moving back to starting focus: ', focus_start)\n\n g_dev['foc'].guarded_move((focus_start)*g_dev['foc'].micron_to_steps)\n time.sleep(5)\n self.sequencer_hold = False #Allow comand checks.\n self.af_guard = False\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec) #NB NB Does this really take us back to starting point?\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n if min(x) <= d1 <= max(x):\n print ('Moving to Solved focus: ', round(d1, 2), ' calculated: ', new_spot)\n pos = int(d1*g_dev['foc'].micron_to_steps)\n\n\n\n g_dev['foc'].guarded_move(pos)\n time.sleep(5)\n g_dev['foc'].last_known_focus = d1\n try:\n g_dev['foc'].last_temperature = g_dev['foc'].focuser.Temperature\n except:\n g_dev['foc'].last_temperature = 7.5 #NB NB NB this should be a config file default.\n g_dev['foc'].last_source = \"auto_focus_script\"\n\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False) # script = 'auto_focus_script_3') # This is verifying the new focus.\n else:\n result['FWHM'] = new_spot\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot4 = result['FWHM']\n foc_pos4 = result['mean_focus']\n except:\n spot4 = False\n foc_pos4 = False\n print (\"spot4 failed \")\n print('\\nFound best focus at: ', foc_pos4,' measured is: ', round(spot4, 2), '\\n')\n g_dev['foc'].af_log(foc_pos4, spot4, new_spot)\n print(\"Returning to: \", start_ra, start_dec)\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec) #Return to pre-focus pointing.\n if sim:\n\n g_dev['foc'].guarded_move((focus_start)*g_dev['foc'].micron_to_steps)\n # NB here we could re-solve with the overlay spot just to verify solution is sane.\n\n # NB NB We may want to consider sending the result image patch to AWS\n # NB NB NB I think we may have spot numbers wrong by 1 count and coarse focs not set up correctly.\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n elif spot2 <= spot1 < spot3: #Add to the inside\n pass\n print('Autofocus Moving In 2nd time.\\n\\n')\n g_dev['foc'].guarded_move((foc_pos0 - 2.5*throw)*g_dev['foc'].micron_to_steps)\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False) ## , script = 'auto_focus_script_1') # This is moving in one throw.\n else:\n result['FWHM'] = 6\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot4 = result['FWHM']\n foc_pos4 = result['mean_focus']\n except:\n spot4 = False\n foc_pos4 = False\n print (\"spot4 failed on autofocus moving in 2nd time.\")\n x = [foc_pos4, foc_pos2, foc_pos1, foc_pos3]\n y = [spot4, spot2, spot1, spot3]\n print('X, Y: ', x, y, 'Desire center to be smallest.')\n try:\n #Digits are to help out pdb commands!\n a1, b1, c1, d1 = fit_quadratic(x, y)\n new_spot = round(a1*d1*d1 + b1*d1 + c1, 2)\n\n except:\n\n print('Autofocus quadratic equation not converge. Moving back to starting focus: ', focus_start)\n\n g_dev['foc'].guarded_move((focus_start)*g_dev['foc'].micron_to_steps)\n time.sleep(5)\n self.sequencer_hold = False #Allow comand checks.\n self.af_guard = False\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec) #NB NB Does this really take us back to starting point?\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n if min(x) <= d1 <= max(x):\n print ('Moving to Solved focus: ', round(d1, 2), ' calculated: ', new_spot)\n pos = int(d1*g_dev['foc'].micron_to_steps)\n\n\n\n g_dev['foc'].guarded_move(pos)\n time.sleep(5)\n g_dev['foc'].last_known_focus = d1\n try:\n g_dev['foc'].last_temperature = g_dev['foc'].focuser.Temperature\n except:\n g_dev['foc'].last_temperature = 7.5 #NB NB NB this should be a config file default.\n g_dev['foc'].last_source = \"auto_focus_script\"\n\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False) # script = 'auto_focus_script_3') # This is verifying the new focus.\n else:\n result['FWHM'] = new_spot\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot4 = result['FWHM']\n foc_pos4 = result['mean_focus']\n except:\n spot4 = False\n foc_pos4 = False\n print (\"spot4 failed \")\n print('\\nFound best focus at: ', foc_pos4,' measured is: ', round(spot4, 2), '\\n')\n g_dev['foc'].af_log(foc_pos4, spot4, new_spot)\n print(\"Returning to: \", start_ra, start_dec)\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec) #Return to pre-focus pointing.\n if sim:\n\n g_dev['foc'].guarded_move((focus_start)*g_dev['foc'].micron_to_steps)\n # NB here we could re-solve with the overlay spot just to verify solution is sane.\n\n # NB NB We may want to consider sending the result image patch to AWS\n # NB NB NB I think we may have spot numbers wrong by 1 count and coarse focs not set up correctly.\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n\n elif spot2 > spot1 >= spot3: #Add to the outside\n pass\n print('Autofocus Moving back in half-way.\\n\\n')\n\n g_dev['foc'].guarded_move((foc_pos0 + 2.5*throw)*g_dev['foc'].micron_to_steps) #NB NB NB THIS IS WRONG!\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False) ## , script = 'auto_focus_script_2') # This is moving out one throw.\n else:\n result['FWHM'] = 5.5\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot4 = result['FWHM']\n foc_pos3 = result['mean_focus']\n except:\n spot4 = False\n foc_pos4 = False\n print (\"spot4 failed on autofocus moving out 2nd time.\")\n x = [foc_pos2, foc_pos1, foc_pos3, foc_pos4]\n y = [spot2, spot1, spot3, spot4]\n print('X, Y: ', x, y, 'Desire center to be smallest.')\n try:\n #Digits are to help out pdb commands!\n a1, b1, c1, d1 = fit_quadratic(x, y)\n new_spot = round(a1*d1*d1 + b1*d1 + c1, 2)\n\n except:\n\n print('Autofocus quadratic equation not converge. Moving back to starting focus: ', focus_start)\n\n g_dev['foc'].guarded_move((focus_start)*g_dev['foc'].micron_to_steps)\n time.sleep(5)\n self.sequencer_hold = False #Allow comand checks.\n self.af_guard = False\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec) #NB NB Does this really take us back to starting point?\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n if min(x) <= d1 <= max(x):\n print ('Moving to Solved focus: ', round(d1, 2), ' calculated: ', new_spot)\n pos = int(d1*g_dev['foc'].micron_to_steps)\n\n\n\n g_dev['foc'].guarded_move(pos)\n time.sleep(5)\n g_dev['foc'].last_known_focus = d1\n try:\n g_dev['foc'].last_temperature = g_dev['foc'].focuser.Temperature\n except:\n g_dev['foc'].last_temperature = 7.5 #NB NB NB this should be a config file default.\n g_dev['foc'].last_source = \"auto_focus_script\"\n\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False) # script = 'auto_focus_script_3') # This is verifying the new focus.\n else:\n result['FWHM'] = new_spot\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot4 = result['FWHM']\n foc_pos4 = result['mean_focus']\n except:\n spot4 = False\n foc_pos4 = False\n print (\"spot4 failed \")\n print('\\nFound best focus at: ', foc_pos4,' measured is: ', round(spot4, 2), '\\n')\n g_dev['foc'].af_log(foc_pos4, spot4, new_spot)\n print(\"Returning to: \", start_ra, start_dec)\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec) #Return to pre-focus pointing.\n if sim:\n\n g_dev['foc'].guarded_move((focus_start)*g_dev['foc'].micron_to_steps)\n # NB here we could re-solve with the overlay spot just to verify solution is sane.\n\n # NB NB We may want to consider sending the result image patch to AWS\n # NB NB NB I think we may have spot numbers wrong by 1 count and coarse focs not set up correctly.\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n elif spot2 <= spot1 or spot3 <= spot1:\n if spot2 <= spot3:\n min_focus = foc_pos2\n elif spot3 <= spot2:\n min_focus = foc_pos3\n else:\n min_focus = foc_pos0\n\n ## HERE we could add a fourth or fifth try. The parabola cannot really invert, nor should we ever be at a wild point after the first focus is\n ## set up.\n print(\"It appears camera is too far out; try again with coarse_focus_script.\")\n self.coarse_focus_script(req2, opt2, throw=throw + 75, begin_at=min_focus)\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n else:\n print('Spots are really wrong so moving back to starting focus: ', focus_start)\n g_dev['foc'].focuser.Move((focus_start)*g_dev['foc'].micron_to_steps)\n print(\"Returning to: \", start_ra, start_dec)\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec) #Return to pre-focus pointing.\n if sim:\n\n g_dev['foc'].guarded_move((focus_start)*g_dev['foc'].micron_to_steps)\n # NB here we could re-solve with the overlay spot just to verify solution is sane.\n self.sequencer_hold = False #Allow comand checks.\n self.af_guard = False\n # NB NB We may want to consider sending the result image patch to AWS\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n\n\n def coarse_focus_script(self, req, opt, throw=700, begin_at=None):\n '''\n V curve is a big move focus designed to fit two lines adjacent to the more normal focus curve.\n It finds the approximate focus, particulary for a new instrument. It requires 8 points plus\n a verify.\n Auto focus consists of three points plus a verify.\n Fine focus consists of five points plus a verify.\n Optionally individual images can be multiples of one to average out seeing.\n NBNBNB This code needs to go to known stars to be moe relaible and permit subframes\n '''\n print('AF entered with: ', req, opt)\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = True\n sim = False #g_dev['enc'].status['shutter_status'] in ['Closed', 'closed', 'Closing', 'closing']\n print('AF entered with: ', req, opt, '\\n .. and sim = ', sim)\n #self.sequencer_hold = True #Blocks command checks.\n start_ra = g_dev['mnt'].mount.RightAscension\n start_dec = g_dev['mnt'].mount.Declination\n if begin_at is None: # ADDED 20120821 WER\n foc_start = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n else:\n foc_start = begin_at #In this case we start at a place close to a 3 point minimum.\n g_dev['foc'].guarded_move((foc_start)*g_dev['foc'].micron_to_steps)\n print(\"Saved ra, dec, focus: \", start_ra, start_dec, foc_start)\n try:\n #Check here for filter, guider, still moving THIS IS A CLASSIC\n #case where a timeout is a smart idea.\n #Wait for external motion to cease before exposing. Note this precludes satellite tracking.\n st = \"\"\n while g_dev['foc'].focuser.IsMoving or g_dev['rot'].rotator.IsMoving or \\\n g_dev['mnt'].mount.Slewing: #or g_dev['enc'].status['dome_slewing']: #Filter is moving??\n if g_dev['foc'].focuser.IsMoving: st += 'f>'\n if g_dev['rot'].rotator.IsMoving: st += 'r>'\n if g_dev['mnt'].mount.Slewing: st += 'm>'\n #if g_dev['enc'].status['dome_slewing']: st += 'd>'\n print(st)\n st = \"\"\n time.sleep(0.2)\n g_dev['obs'].update_status()\n except:\n print(\"Motion check faulted.\")\n if req['target'] == 'near_tycho_star': ## 'bin', 'area' Other parameters\n # Go to closest Mag 7.5 Tycho * with no flip\n focus_star = tycho.dist_sort_targets(g_dev['mnt'].current_icrs_ra, g_dev['mnt'].current_icrs_dec, \\\n g_dev['mnt'].current_sidereal)\n print(\"Going to near focus star \" + str(focus_star[0][0]) + \" degrees away.\")\n g_dev['mnt'].go_coord(focus_star[0][1][1], focus_star[0][1][0])\n req = {'time': 12.5, 'alias': str(self.config['camera']['camera_1_1']['name']), 'image_type': 'auto_focus'} # NB Should pick up filter and constats from config\n opt = {'area': 100, 'count': 1, 'filter': 'focus'}\n else:\n pass #Just take time image where currently pointed.\n req = {'time': 15, 'alias': str(self.config['camera']['camera_1_1']['name']), 'image_type': 'auto_focus'} # NB Should pick up filter and constats from config\n opt = {'area': 100, 'count': 1, 'filter': 'focus'}\n foc_pos0 = foc_start\n result = {}\n print('Autofocus Starting at: ', foc_pos0, '\\n\\n')\n\n g_dev['foc'].guarded_move((foc_pos0 - 0*throw)*g_dev['foc'].micron_to_steps) #Added 20220209! A bit late\n #throw = 100 # NB again, from config. Units are microns\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False)\n else:\n result['FWHM'] = 4\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot1 = result['FWHM']\n foc_pos1 = result['mean_focus']\n except:\n spot1 = False\n foc_pos1 = False\n print (\"spot1 failed on coarse focus script\")\n # if not sim:\n # result = g_dev['cam'].expose_command(req, opt, no_AWS=True) ## , script = 'auto_focus_script_0') # This is where we start.\n # else:\n # result['FWHM'] = 3\n # result['mean_focus'] = foc_pos0\n # spot1 = result['FWHM']\n # foc_pos1 = result['mean_focus']\n\n\n print('Autofocus Moving In -1x, second time.\\n\\n')\n\n g_dev['foc'].guarded_move((foc_pos0 - 1*throw)*g_dev['foc'].micron_to_steps)\n #opt['fwhm_sim'] = 4.\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False)\n else:\n result['FWHM'] = 5\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot2 = result['FWHM']\n foc_pos2 = result['mean_focus']\n except:\n spot2 = False\n foc_pos2 = False\n print (\"spot2 failed on coarse focus script\")\n print('Autofocus Moving In -2x, second time.\\n\\n')\n\n g_dev['foc'].guarded_move((foc_pos0 - 2*throw)*g_dev['foc'].micron_to_steps)\n #opt['fwhm_sim'] = 4.\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False)\n else:\n result['FWHM'] = 6\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot3 = result['FWHM']\n foc_pos3 = result['mean_focus']\n except:\n spot3 = False\n foc_pos3 = False\n print (\"spot3 failed on coarse focus script\")\n #Need to check we are not going out too far!\n print('Autofocus Moving out +3X.\\n\\n')\n\n g_dev['foc'].guarded_move((foc_pos0 + 3*throw)*g_dev['foc'].micron_to_steps)\n print('Autofocus back in for backlash to +2X\\n\\n')#It is important to overshoot to overcome any backlash\n g_dev['foc'].guarded_move((foc_pos0 + 2*throw)*g_dev['foc'].micron_to_steps)\n #opt['fwhm_sim'] = 5\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False)\n else:\n result['FWHM'] = 6.5\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot4 = result['FWHM']\n foc_pos4 = result['mean_focus']\n except:\n spot4 = False\n foc_pos4 = False\n print (\"spot4 failed on coarse focus script\")\n print('Autofocus back in for backlash to +1X\\n\\n')\n\n g_dev['foc'].guarded_move((foc_pos0 + throw)*g_dev['foc'].micron_to_steps)\n #opt['fwhm_sim'] = 4.\n if not sim:\n result = g_dev['cam'].expose_command(req, opt, no_AWS=True, solve_it=False)\n else:\n result['FWHM'] = 5.75\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot5 = result['FWHM']\n foc_pos5 = result['mean_focus']\n except:\n spot5 = False\n foc_pos5 = False\n print (\"spot5 failed on coarse focus script\")\n x = [foc_pos3, foc_pos2, foc_pos1, foc_pos5, foc_pos4] # NB NB 20220218 This assigment is bogus!!!!\n y = [spot3, spot2, spot1, spot5, spot4]\n print('X, Y: ', x, y)\n try:\n #Digits are to help out pdb commands!\n a1, b1, c1, d1 = fit_quadratic(x, y)\n new_spot = round(a1*d1*d1 + b1*d1 + c1, 2)\n except:\n print('Autofocus quadratic equation not converge. Moving back to starting focus: ', foc_start)\n\n g_dev['foc'].guarded_move((foc_start)*g_dev['foc'].micron_to_steps)\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return\n if min(x) <= d1 <= max(x):\n print ('Moving to Solved focus: ', round(d1, 2), ' calculated: ', new_spot)\n #Saves a base for relative focus adjusts.\n pos = int(d1*g_dev['foc'].micron_to_steps)\n\n g_dev['foc'].guarded_move(pos)\n g_dev['foc'].last_known_focus = d1\n try:\n g_dev['foc'].last_temperature = g_dev['foc'].focuser.Temperature\n except:\n g_dev['foc'].last_temperature = 10.0 #NB NB This should be a site monthly default.\n g_dev['foc'].last_source = \"coarse_focus_script\"\n if not sim:\n\n result = g_dev['cam'].expose_command(req, opt, solve_it=False)\n else:\n result['FWHM'] = new_spot\n result['mean_focus'] = g_dev['foc'].focuser.Position*g_dev['foc'].steps_to_micron\n try:\n spot6 = result['FWHM']\n foc_pos4 = result['mean_focus']\n print('\\n\\n\\nFound best focus at: ', foc_pos4,' measured is: ', round(spot6, 2), '\\n\\n\\n')\n except:\n print('Known bug, Verifcation did not work. Returing to target using solved focus.')\n else:\n print('Coarse_focus did not converge. Moving back to starting focus: ', foc_pos0)\n\n g_dev['foc'].guarded_move((foc_start)*g_dev['foc'].micron_to_steps)\n print(\"Returning to: \", start_ra, start_dec)\n g_dev['mnt'].mount.SlewToCoordinatesAsync(start_ra, start_dec) #Return to pre-focus pointing.\n if sim:\n g_dev['foc'].guarded_move((foc_start)*g_dev['foc'].micron_to_steps)\n self.sequencer_hold = False\n self.guard = False\n self.af_guard = False\n return result\n\n\n def equatorial_pointing_run(self, req, opt, spacing=10, vertical=False, grid=False, alt_minimum=25):\n '''\n unpark telescope\n if not open, open dome\n go to zenith & expose (Consider using Nearest mag 7 grid star.)\n verify reasonable transparency\n Ultimately, check focus, find a good exposure level\n go to -72.5 degrees of ha, 0 expose\n ha += 10; repeat to Ha = 67.5\n += 5, expose\n -= 10 until -67.5\n\n if vertical go ha = -0.25 and step dec 85 -= 10 to -30 then\n flip and go other way with offset 5 deg.\n\n For Grid use Patrick Wallace's Mag 7 Tyco star grid it covers\n sky equal-area, has a bright star as target and wraps around\n both axes to better sample the encoders. Choose and load the\n grid coarseness.\n '''\n '''\n Prompt for ACCP model to be turned off\n if closed:\n If WxOk: open\n if parked:\n unpark\n\n pick grid star near zenith in west (no flip)\n expose 10 s\n solve\n Is there a bright object in field?\n adjust exposure if needed.\n Go to (-72.5deg HA, dec = 0),\n Expose, calibrate, save file. Consider\n if we can real time solve or just gather.\n step 10 degrees forward untl ha is 77.5\n at 77.5 adjust target to (72.5, 0) and step\n backward. Stop when you get to -77.5.\n park\n Launch reduction\n\nA variant on this is cover a grid, cover a + sign shape.\nIF sweep\n '''\n # ptr_utility.ModelOn = False\n\n self. sky_guard = True\n ha_deg_steps = (-72.5, -62.5, -52.5, -42.5, -32.5, -22.5, -12.5, -2.5, \\\n -7.5, -17.5, -27.5, -37.5, -47.5, -57.5, -67.5, \\\n 2.5, 12.5, 22.5, 32.5, 42.5, 52.5, 62.5, 72.5, \\\n 67.5, 57.5, 47.5, 37.5, 27.5, 17.5, 7.5)\n length = len(ha_deg_steps)\n count = 0\n print(\"Starting equatorial sweep.\")\n g_dev['mnt'].unpark_command()\n #cam_name = str(self.config['camera']['camera_1_1']['name'])\n for ha_degree_value in ha_deg_steps:\n target_ra = ra_fix(g_dev['mnt'].mount.SiderealTime - ha_degree_value/15.)\n target_dec = 0\n # # Go to closest Mag 7.5 Tycho * with no flip\n # focus_star = tycho.dist_sort_targets(target_ra, target_dec, \\\n # g_dev['mnt'].mount.SiderealTime)\n # if focus_star is None:\n # print(\"No near star, skipping.\") #This should not happen.\n # continue\n #print(\"Going to near focus star \" + str(focus_star[0]) + \" degrees away.\")\n #req = {'ra': focus_star[1][1],\n # 'dec': focus_star[1][0] #Note order in important (dec, ra)\n req = {'ra': target_ra,\n 'dec': target_dec #Note order in important (dec, ra)\n }\n opt = {}\n g_dev['mnt'].go_command(req, opt)\n st = ''\n enc_status = eval(self.redis_server.get('enc_status')) #NB Is this current?\n while g_dev['mnt'].mount.Slewing or enc_status['dome_slewing']:\n if g_dev['mnt'].mount.Slewing: st += 'm>'\n if g_dev['enc'].status['dome_slewing']: st += 'd>'\n print(st)\n st = ''\n g_dev['obs'].update_status()\n time.sleep(0.5)\n time.sleep(3)\n g_dev['obs'].update_status()\n req = {'time': 10, 'alias': 'sq01', 'image_type': 'experimental'}\n opt = {'area': 150, 'count': 1, 'bin': '2,2', 'filter': g_dev['fil'].filter_data[0][0], 'hint': 'Equator Run'}\n result = g_dev['cam'].expose_command(req, opt)\n g_dev['obs'].update_status()\n result = 'simulated result.'\n count += 1\n print('\\n\\nResult: ', result, 'To go count: ', length - count, '\\n\\n')\n g_dev['mnt'].mount.Tracking = False\n print(\"Equatorial sweep completed. Happy reducing.\")\n ptr_utility.ModelOn = True\n self.sky_guard = False\n return\n\n def cross_pointing_run(self, req, opt, spacing=30, vertical=False, grid=False, alt_minimum=25):\n '''\n unpark telescope\n if not open, open dome\n go to zenith & expose (Consider using Nearest mag 7 grid star.)\n verify reasonable transparency\n Ultimately, check focus, find a good exposure level\n go to -72.5 degrees of ha, 0 expose\n ha += 10; repeat to Ha = 67.5\n += 5, expose\n -= 10 until -67.5\n\n if vertical go ha = -0.25 and step dec 85 -= 10 to -30 then\n flip and go other way with offset 5 deg.\n\n For Grid use Patrick Wallace's Mag 7 Tyco star grid it covers\n sky equal-area, has a bright star as target and wraps around\n both axes to better sample the encoders. Choose and load the\n grid coarseness.\n '''\n '''\n Prompt for ACCP model to be turned off\n if closed:\n If WxOk: open\n if parked:\n unpark\n\n pick grid star near zenith in west (no flip)\n expose 10 s\n solve\n Is there a bright object in field?\n adjust exposure if needed.\n Go to (-72.5deg HA, dec = 0),\n Expose, calibrate, save file. Consider\n if we can real time solve or just gather.\n step 10 degrees forward untl ha is 77.5\n at 77.5 adjust target to (72.5, 0) and step\n backward. Stop when you get to -77.5.\n park\n Launch reduction\n\nA variant on this is cover a grid, cover a + sign shape.\nIF sweep\n '''\n # ptr_utility.ModelOn = False\n\n self. sky_guard = True\n points = [(-2.5, 0), (-2.5, -30), (-30, 0), (-60, 0), (2.5, 75), (0.5, 45), \\\n (0.5, 0), (30, 0), (60, 0)]\n ha_deg_steps = (-72.5, -62.5, -52.5, -42.5, -32.5, -22.5, -12.5, -2.5, \\\n -7.5, -17.5, -27.5, -37.5, -47.5, -57.5, -67.5, \\\n 2.5, 12.5, 22.5, 32.5, 42.5, 52.5, 62.5, 72.5, \\\n 67.5, 57.5, 47.5, 37.5, 27.5, 17.5, 7.5)\n length = len(points)\n count = 0\n print(\"Starting cross, # of points: \", length)\n g_dev['mnt'].unpark_command()\n #cam_name = str(self.config['camera']['camera_1_1']['name'])\n for point_value in points:\n target_ra = ra_fix(g_dev['mnt'].mount.SiderealTime - point_value[0]/15.)\n target_dec = point_value[1]\n # # Go to closest Mag 7.5 Tycho * with no flip\n # focus_star = tycho.dist_sort_targets(target_ra, target_dec, \\\n # g_dev['mnt'].mount.SiderealTime)\n # if focus_star is None:\n # print(\"No near star, skipping.\") #This should not happen.\n # continue\n #print(\"Going to near focus star \" + str(focus_star[0]) + \" degrees away.\")\n #req = {'ra': focus_star[1][1],\n # 'dec': focus_star[1][0] #Note order in important (dec, ra)\n req = {'ra': target_ra,\n 'dec': target_dec #Note order in important (dec, ra)\n }\n opt = {}\n g_dev['mnt'].go_command(req, opt)\n st = ''\n while g_dev['mnt'].mount.Slewing or g_dev['enc'].status['dome_slewing']:\n if g_dev['mnt'].mount.Slewing: st += 'm>'\n if g_dev['enc'].status['dome_slewing']: st += 'd>'\n print(st)\n st = ''\n g_dev['obs'].update_status()\n time.sleep(0.5)\n time.sleep(3)\n g_dev['obs'].update_status()\n req = {'time': 30, 'alias': 'sq01', 'image_type': 'experimental'}\n opt = {'area': 150, 'count': 1, 'bin': '2,2', 'filter': g_dev['fil'].filter_data[0][0], 'hint': 'Equator Run'}\n result = g_dev['cam'].expose_command(req, opt)\n g_dev['obs'].update_status()\n result = 'simulated result.'\n count += 1\n print('\\n\\nResult: ', result, 'To go count: ', length - count, '\\n\\n')\n g_dev['mnt'].mount.Tracking = False\n print(\"Equatorial sweep completed. Happy reducing.\")\n ptr_utility.ModelOn = True\n self.sky_guard = False\n return\n\n def sky_grid_pointing_run(self, req, opt, spacing=10, vertical=False, grid=False, alt_minimum=25):\n #camera_name = str(self.config['camera']['camera_1_1']['name'])\n '''\n unpark telescope\n if not open, open dome\n go to zenith & expose (Consider using Nearest mag 7 grid star.)\n verify reasonable transparency\n Ultimately, check focus, find a good exposure level\n go to -72.5 degrees of ha, 0 expose\n ha += 10; repeat to Ha = 67.5\n += 5, expose\n -= 10 until -67.5\n\n if vertical go ha = -0.25 and step dec 85 -= 10 to -30 then\n flip and go other way with offset 5 deg.\n\n For Grid use Patrick Wallace's Mag 7 Tyco star grid it covers\n sky equal-area, has a bright star as target and wraps around\n both axes to better sample the encoders. Choose and load the\n grid coarseness.\n '''\n '''\n Prompt for ACCP model to be turned off\n if closed:\n If WxOk: open\n if parked:\n unpark\n\n pick grid star near zenith in west (no flip)\n expose 10 s\n solve\n Is there a bright object in field?\n adjust exposure if needed.\n Go to (-72.5deg HA, dec = 0),\n Expose, calibrate, save file. Consider\n if we can real time solve or just gather.\n step 10 degrees forward untl ha is 77.5\n at 77.5 adjust target to (72.5, 0) and step\n backward. Stop when you get to -77.5.\n park\n Launch reduction\n\nA variant on this is cover a grid, cover a + sign shape.\nIF sweep\n '''\n self.sky_guard = True\n #ptr_utility.ModelOn = False\n print(\"Starting sky sweep. \")\n g_dev['mnt'].unpark_command({}, {})\n if g_dev['enc'].is_dome:\n g_dev['enc'].Slaved = True #Bring the dome into the picture.\n g_dev['obs'].update_status()\n try:\n g_dev['scr'].screen_dark()\n except:\n pass\n g_dev['obs'].update_status()\n g_dev['mnt'].unpark_command()\n #cam_name = str(self.config['camera']['camera_1_1']['name'])\n\n sid = g_dev['mnt'].mount.SiderealTime\n if req['gridType'] == 'medium': # ~50\n grid = 4\n if req['gridType'] == 'coarse': # ~30\n grid = 7\n if req['gridType'] == 'fine': # ~100\n grid = 2\n\n grid_stars = tycho.az_sort_targets(sid, grid) #4 produces about 50 targets.\n length = len(grid_stars)\n print(length, \"Targets chosen for grid.\")\n last_az = 0.25\n count = 0\n for grid_star in grid_stars:\n if grid_star is None:\n print(\"No near star, skipping.\") #This should not happen.\n count += 1\n continue\n if grid_star[0] < last_az: #Consider also insisting on a reasonable HA, eg., >= altitude of the Pole.\n count += 1\n continue\n last_az = grid_star[0] + 0.01\n print(\"Going to near grid star \" + str(grid_star) + \" (az, (dec, ra)\")\n req = {'ra': grid_star[1][1],\n 'dec': grid_star[1][0] #Note order is important (dec, ra)\n }\n opt = {}\n g_dev['mnt'].go_command(req, opt)\n time.sleep(0.5)\n st = ''\n while g_dev['mnt'].mount.Slewing or g_dev['enc'].status['dome_slewing']:\n if g_dev['mnt'].mount.Slewing: st += 'm>'\n if g_dev['enc'].status['dome_slewing']: st += 'd>'\n print(st)\n st = ''\n g_dev['obs'].update_status()\n time.sleep(0.5)\n\n time.sleep(1) #Give a little extra time for mount to settle.\n g_dev['obs'].update_status()\n req = {'time': 30, 'alias': 'sq01', 'image_type': 'experimental'}\n opt = {'area': 150, 'count': 1, 'bin': '2,2', 'filter': g_dev['fil'].filter_data[0][0], 'hint': 'Tycho grid.'}\n result = g_dev['cam'].expose_command(req, opt)\n g_dev['obs'].update_status()\n result = 'simulated result.'\n count += 1\n print('\\n\\nResult: ', result, 'To go count: ', length - count, '\\n\\n')\n\n #g_dev['mnt'].park()\n print(\"Equatorial sweep completed. Happy reducing.\")\n ptr_utility.ModelOn = True\n self.sky_guard = False\n return\n\n def rel_sky_grid_pointing_run(self, req, opt, spacing=10, vertical=False, grid=False, alt_minimum=25):\n #camera_name = str(self.config['camera']['camera_1_1']['name'])\n '''\n unpark telescope\n if not open, open dome\n go to zenith & expose (Consider using Nearest mag 7 grid star.)\n verify reasonable transparency\n Ultimately, check focus, find a good exposure level\n go to -72.5 degrees of ha, 0 expose\n ha += 10; repeat to Ha = 67.5\n += 5, expose\n -= 10 until -67.5\n\n if vertical go ha = -0.25 and step dec 85 -= 10 to -30 then\n flip and go other way with offset 5 deg.\n\n For Grid use Patrick Wallace's Mag 7 Tyco star grid it covers\n sky equal-area, has a bright star as target and wraps around\n both axes to better sample the encoders. Choose and load the\n grid coarseness.\n '''\n '''\n Prompt for ACCP model to be turned off\n if closed:\n If WxOk: open\n if parked:\n unpark\n\n pick grid star near zenith in west (no flip)\n expose 10 s\n solve\n Is there a bright object in field?\n adjust exposure if needed.\n Go to (-72.5deg HA, dec = 0),\n Expose, calibrate, save file. Consider\n if we can real time solve or just gather.\n step 10 degrees forward untl ha is 77.5\n at 77.5 adjust target to (72.5, 0) and step\n backward. Stop when you get to -77.5.\n park\n Launch reduction\n\nA variant on this is cover a grid, cover a + sign shape.\nIF sweep\n '''\n #breakpoint()\n self.sky_guard = True\n ptr_utility.ModelOn = False\n print(\"Starting sky sweep.\")\n g_dev['mnt'].unpark_command({}, {})\n if g_dev['enc'].is_dome:\n g_dev['enc'].Slaved = True #Bring the dome into the picture.\n g_dev['obs'].update_status()\n g_dev['scr'].screen_dark()\n g_dev['obs'].update_status()\n g_dev['mnt'].unpark_command()\n #cam_name = str(self.config['camera']['camera_1_1']['name'])\n\n sid = g_dev['mnt'].mount.SiderealTime\n if req['gridType'] == 'medium': # ~50\n grid = 4\n if req['gridType'] == 'coarse': # ~30\n grid = 7\n if req['gridType'] == 'fine': # ~100\n grid = 2\n grid_stars = tycho.tpt_grid\n length = len(grid_stars)\n print(length, \"Targets chosen for grid.\")\n last_az = 0.25\n count = 0\n for grid_star in grid_stars:\n if grid_star is None:\n print(\"No near star, skipping.\") #This should not happen.\n count += 1\n continue\n if grid_star[0] < last_az: #Consider also insisting on a reasonable HA\n count += 1\n continue\n last_az = grid_star[0] + 0.001\n print(\"Going to near grid star \" + str(grid_star) + \" (az, (dec, ra)\")\n req = {'ra': grid_star[1][1],\n 'dec': grid_star[1][0] #Note order is important (dec, ra)\n }\n opt = {}\n g_dev['mnt'].go_command(req, opt)\n time.sleep(0.5)\n st = ''\n while g_dev['mnt'].mount.Slewing or g_dev['enc'].status['dome_slewing']:\n if g_dev['mnt'].mount.Slewing: st += 'm>'\n if g_dev['enc'].status['dome_slewing']: st += 'd>'\n print(st)\n st = ''\n g_dev['obs'].update_status()\n time.sleep(0.5)\n\n time.sleep(3)\n g_dev['obs'].update_status()\n req = {'time': 15, 'alias': 'sq01', 'image_type': 'experimental'}\n opt = {'area': 150, 'count': 1, 'bin': '2,2', 'filter': g_dev['fil'].filter_data[0][0], 'hint': 'Tycho grid.'}\n result = g_dev['cam'].expose_command(req, opt)\n g_dev['obs'].update_status()\n result = 'simulated result.'\n count += 1\n print('\\n\\nResult: ', result, 'To go count: ', length - count, '\\n\\n')\n\n g_dev['mnt'].mount.Tracking = False\n print(\"Equatorial sweep completed. Happy reducing.\")\n ptr_utility.ModelOn = True\n self.sky_guard = False\n return\n\n def vertical_pointing_run(self, req, opt, spacing=10, vertical=False, grid=False, alt_minimum=25):\n '''\n unpark telescope\n if not open, open dome\n go to zenith & expose (Consider using Nearest mag 7 grid star.)\n verify reasonable transparency\n Ultimately, check focus, find a good exposure level\n go to -72.5 degrees of ha, 0 expose\n ha += 10; repeat to Ha = 67.5\n += 5, expose\n -= 10 until -67.5\n\n if vertical go ha = -0.25 and step dec 85 -= 10 to -30 then\n flip and go other way with offset 5 deg.\n\n For Grid use Patrick Wallace's Mag 7 Tyco star grid it covers\n sky equal-area, has a bright star as target and wraps around\n both axes to better sample the encoders. Choose and load the\n grid coarseness.\n '''\n '''\n Prompt for ACCP model to be turned off\n if closed:\n If WxOk: open\n if parked:\n unpark\n\n pick grid star near zenith in west (no flip)\n expose 10 s\n solve\n Is there a bright object in field?\n adjust exposure if needed.\n Go to (-72.5deg HA, dec = 0),\n Expose, calibrate, save file. Consider\n if we can real time solve or just gather.\n step 10 degrees forward untl ha is 77.5\n at 77.5 adjust target to (72.5, 0) and step\n backward. Stop when you get to -77.5.\n park\n Launch reduction\n\nA variant on this is cover a grid, cover a + sign shape.\nIF sweep\n '''\n self.sky_guard = True\n #ptr_utility.ModelOn = False\n # dec_steps = [-30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30, \\\n # 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85]\n dec_steps = [-30, -20, -10, 0, 10, 20, 30, 40, 50, 55, 60, 65, 70, 75, 80, 82.5, \\\n 77.5, 72.5, 67.5, 62.5, 57.5, 50, 45, 35, 25, 15, 5, -5, -15, -25]\n # dec_copy = dec_steps[:-1].copy()\n # dec_copy.reverse()\n # dec_steps += dec_copy\n length = len(dec_steps)*2\n count = 0\n print(\"Starting West dec sweep, ha = 0.1\")\n g_dev['mnt'].unpark_command()\n #cam_name = str(self.config['camera']['camera_1_1']['name'])\n for ha in [0.1, -0.1]:\n for degree_value in dec_steps:\n target_ra = ra_fix(g_dev['mnt'].mount.SiderealTime - ha)\n\n\n # # Go to closest Mag 7.5 Tycho * with no flip\n # focus_star = tycho.dist_sort_targets(target_ra, target_dec, \\\n # g_dev['mnt'].mount.SiderealTime)\n # if focus_star is None:\n # print(\"No near star, skipping.\") #This should not happen.\n # continue\n # print(\"Going to near focus star \" + str(focus_star[0]) + \" degrees away.\")\n req = {'ra': target_ra,\n 'dec': degree_value}\n opt = {}\n #Should have an Alt limit check here\n g_dev['mnt'].go_command(req, opt)\n st = ''\n while g_dev['mnt'].mount.Slewing or g_dev['enc'].status['dome_slewing']:\n if g_dev['mnt'].mount.Slewing: st += 'm>'\n if g_dev['enc'].status['dome_slewing']: st += 'd>'\n print(st)\n st = ''\n g_dev['obs'].update_status()\n time.sleep(0.5)\n time.sleep(3)\n g_dev['obs'].update_status()\n req = {'time': 15, 'alias': 'sq01', 'image_type': 'experimental'}\n opt = {'area': 150, 'count': 1, 'bin': '2,2', 'filter': g_dev['fil'].filter_data[0][0], 'hint': 'Tycho grid.'}\n result = g_dev['cam'].expose_command(req, opt)\n g_dev['obs'].update_status()\n result = 'simulated result.'\n count += 1\n print('\\n\\nResult: ', result, 'To go count: ', length - count, '\\n\\n')\n g_dev['obs'].update_status()\n result = 'simulated'\n print('Result: ', result)\n g_dev['mnt'].stop_command()\n print(\"Vertical sweep completed. Happy reducing.\")\n self.equitorial_pointing_run({},{})\n ptr_utility.ModelOn = True\n self.sky_guard = False\n return\n\n def append_completes(self, block_id):\n camera = self.config['camera']['camera_1_1']['name']\n seq_shelf = shelve.open(g_dev['cam'].site_path + 'ptr_night_shelf/' + camera)\n print(\"block_id: \", block_id)\n lcl_list = seq_shelf['completed_blocks']\n lcl_list.append(block_id) #NB NB an in-line append did not work!\n seq_shelf['completed_blocks']= lcl_list\n print('Appended completes contains: ', seq_shelf['completed_blocks'])\n seq_shelf.close()\n return\n\n def is_in_completes(self, check_block_id):\n camera = self.config['camera']['camera_1_1']['name']\n seq_shelf = shelve.open(g_dev['cam'].site_path + 'ptr_night_shelf/' + camera)\n #print('Completes contains: ', seq_shelf['completed_blocks'])\n if check_block_id in seq_shelf['completed_blocks']:\n seq_shelf.close()\n return True\n else:\n seq_shelf.close()\n return False\n\n\n def reset_completes(self):\n try:\n camera = self.config['camera']['camera_1_1']['name']\n seq_shelf = shelve.open(g_dev['cam'].site_path + 'ptr_night_shelf/' + str(camera))\n seq_shelf['completed_blocks'] = []\n seq_shelf.close()\n except:\n print('Found an empty shelf. Reset_(block)completes for: ', camera)\n return\n\n # import math\n # chip_x =1.4022\n # chip_y = 0.9362\n # def tile_field(field_x, field_y, chip_x, chip_y, overlap=12.5):\n # trial_x = field_x/(chip_x* (100 - abs(overlap))/100)\n # trial_y = field_y/(chip_y* (100 - abs(overlap))/100)\n # proposed_x = round(trial_x + 0.25, 0)\n # proposed_y = round(trial_y + 0.25, 0)\n # span_x = chip_x*proposed_x\n # span_y = chip_y*proposed_y\n # over_span_x = span_x - field_x\n # over_span_y = span_y - field_y\n # span_y = chip_y*proposed_y\n # if proposed_x - 1 >= 1:\n # x_overlap = over_span_x/(proposed_x - 1)\n # else:\n # x_overlap =(field_x - span_x)/2\n # if proposed_y - 1 >= 1:\n # y_overlap = over_span_y/(proposed_y - 1)\n # else:\n # y_overlap =(field_y - span_y)/2\n # if 0 <= x_overlap < overlap/100:\n # proposed_x += 1\n # span_x = chip_x*proposed_x\n # over_span_x = span_x - field_x\n # x_overlap = over_span_x/(proposed_x - 1)\n # if 0 <= y_overlap < overlap/100:\n # proposed_y += 1\n # span_y = chip_y*proposed_y\n # over_span_y = span_y - field_y\n # y_overlap = over_span_y/(proposed_y - 1)\n # return(proposed_x, proposed_y, x_overlap, y_overlap)\n # for side in range(0,7):\n # area = math.sqrt(2)**side\n # print(side, round(area, 3))\n # print(tile_field(side, side, chip_x, chip_y))","sub_path":"devices/sequencer.py","file_name":"sequencer.py","file_ext":"py","file_size_in_byte":130871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"85184841","text":"import json\nimport argparse\nimport helpers\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.optim import lr_scheduler\nfrom torchvision import datasets, transforms\n\nparser = argparse.ArgumentParser()\nparser.add_argument('data_dir', help='Dataset directory')\nparser.add_argument('--save_dir', default='.', help='Checkpoint save directory')\nparser.add_argument('--arch', choices=['resnet18', 'alexnet', 'vgg16', 'squeezenet', 'densenet'], default='vgg16', help='Architecture')\nparser.add_argument('--learning_rate', type=float, default=0.001, help='Training learning rate')\nparser.add_argument('--hidden_units', type=int, nargs='+', default=[2048, 512], help='Hidden layers')\nparser.add_argument('--epochs', type=int, default=25, help='Number of epochs')\nparser.add_argument('--gpu', action=\"store_true\", help='Use GPU')\nargs = parser.parse_args()\n\ndevice = torch.device(\"cpu\")\nif args.gpu:\n if torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\n else:\n print('GPU is not available')\n exit()\nwith open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n\nimage_size = 224\ninput_size = int((image_size * image_size) / 2)\noutput_size = len(cat_to_name)\n\ndata_dir = args.data_dir\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n\ntrain_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\ntest_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\ntrain_data = datasets.ImageFolder(train_dir, transform=train_transforms)\ntest_data = datasets.ImageFolder(test_dir, transform=test_transforms)\n\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size=32)\n\n\n# Load model\nmodel = helpers.buildModel(args.arch, input_size, output_size, args.hidden_units, train_data.class_to_idx)\nmodel.to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.classifier.parameters(), lr=args.learning_rate, momentum=0.9)\nscheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\n\nmodel = helpers.trainModel(device, args.epochs, model, train_data, trainloader, testloader, optimizer, criterion, scheduler)\ntorch.save({'arch': args.arch,\n 'input_size': input_size,\n 'output_size': output_size,\n 'hidden_layers': args.hidden_units,\n 'state_dict': model.state_dict(),\n 'optimizer.state_dict': optimizer.state_dict,\n 'class_to_idx': model.class_to_idx\n }, args.save_dir + '/checkpoint.pth')\n\nprint('Checkpoint saved')","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"590747269","text":"__author__ = 'nhewitt'\n\nimport sys\nimport os\n#import ParseArgs\nimport subprocess\nimport time\n\nimage_directory = \"images\"\nconfig_filename = 'timelapse.conf'\n\ndef write_default_config():\n \"\"\"\n write a default configuration file\n :param _fname: file name of configuration file to write\n :return: none\n \"\"\"\n '''\n raspistill Camera App v1.3.8\n\n Runs camera for specific time, and take JPG capture at end if requested\n\n usage: raspistill [options]\n\n Image parameter commands\n\n -?, --help\t: This help information\n -w, --width\t: Set image width \n -h, --height\t: Set image height \n -q, --quality\t: Set jpeg quality <0 to 100>\n -r, --raw\t: Add raw bayer data to jpeg metadata\n -o, --output\t: Output filename (to write to stdout, use '-o -'). If not specified, no file is saved\n -l, --latest\t: Link latest complete image to filename \n -v, --verbose\t: Output verbose information during run\n -t, --timeout\t: Time (in ms) before takes picture and shuts down (if not specified, set to 5s)\n -th, --thumb\t: Set thumbnail parameters (x:y:quality) or none\n -d, --demo\t: Run a demo mode (cycle through range of camera options, no capture)\n -e, --encoding\t: Encoding to use for output file (jpg, bmp, gif, png)\n -x, --exif\t: EXIF tag to apply to captures (format as 'key=value') or none\n -tl, --timelapse\t: Timelapse mode. Takes a picture every ms\n -fp, --fullpreview\t: Run the preview using the still capture resolution (may reduce preview fps)\n -k, --keypress\t: Wait between captures for a ENTER, X then ENTER to exit\n -s, --signal\t: Wait between captures for a SIGUSR1 from another process\n -g, --gl\t: Draw preview to texture instead of using video render component\n -gc, --glcapture\t: Capture the GL frame-buffer instead of the camera image\n -set, --settings\t: Retrieve camera settings and write to stdout\n -cs, --camselect\t: Select camera . Default 0\n -bm, --burst\t: Enable 'burst capture mode'\n -md, --mode\t: Force sensor mode. 0=auto. See docs for other modes available\n -dt, --datetime\t: Replace frame number in file name with DateTime (YearMonthDayHourMinSec)\n -ts, --timestamp\t: Replace frame number in file name with unix timestamp (seconds since 1900)\n\n Preview parameter commands\n\n -p, --preview\t: Preview window settings <'x,y,w,h'>\n -f, --fullscreen\t: Fullscreen preview mode\n -op, --opacity\t: Preview window opacity (0-255)\n -n, --nopreview\t: Do not display a preview window\n\n Image parameter commands\n\n -sh, --sharpness\t: Set image sharpness (-100 to 100)\n -co, --contrast\t: Set image contrast (-100 to 100)\n -br, --brightness\t: Set image brightness (0 to 100)\n -sa, --saturation\t: Set image saturation (-100 to 100)\n -ISO, --ISO\t: Set capture ISO\n -vs, --vstab\t: Turn on video stabilisation\n -ev, --ev\t: Set EV compensation\n -ex, --exposure\t: Set exposure mode (see Notes)\n -awb, --awb\t: Set AWB mode (see Notes)\n -ifx, --imxfx\t: Set image effect (see Notes)\n -cfx, --colfx\t: Set colour effect (U:V)\n -mm, --metering\t: Set metering mode (see Notes)\n -rot, --rotation\t: Set image rotation (0-359)\n -hf, --hflip\t: Set horizontal flip\n -vf, --vflip\t: Set vertical flip\n -roi, --roi\t: Set region of interest (x,y,w,d as normalised coordinates [0.0-1.0])\n -ss, --shutter\t: Set shutter speed in microseconds\n -awbg, --awbgains\t: Set AWB gains - AWB mode must be off\n -drc, --drc\t: Set DRC Level\n -st, --stats\t: Force recomputation of statistics on stills capture pass\n -a, --annotate\t: Enable/Set annotate flags or text\n -3d, --stereo\t: Select stereoscopic mode\n -dec, --decimate\t: Half width/height of stereo image\n -3dswap, --3dswap\t: Swap camera order for stereoscopic\n -ae, --annotateex\t: Set extra annotation parameters (text size, text colour(hex YUV), bg colour(hex YUV))\n\n\n Notes\n\n Exposure mode options :\n auto,night,nightpreview,backlight,spotlight,sports,snow,beach,verylong,fixedfps,antishake,fireworks\n\n AWB mode options :\n off,auto,sun,cloud,shade,tungsten,fluorescent,incandescent,flash,horizon\n\n Image Effect mode options :\n none,negative,solarise,sketch,denoise,emboss,oilpaint,hatch,gpen,pastel,watercolour,film,blur,saturation,colourswap,washedout,posterise,colourpoint,colourbalance,cartoon\n\n Metering Mode options :\n average,spot,backlit,matrix\n\n Dynamic Range Compression (DRC) options :\n off,low,med,high\n\n Preview parameter commands\n\n -gs, --glscene\t: GL scene square,teapot,mirror,yuv,sobel\n -gw, --glwin\t: GL window settings <'x,y,w,h'>\n\n '''\n config_file = open(config_filename, 'w')\n config_file.writelines([ '-w = 1920\\n', '-h = 1080\\n', '-q = 100\\n', '-e = png\\n', '-ex = auto\\n', '-awb = auto\\n' ])\n config_file.close()\n\n\ndef read_configuration():\n \"\"\"\n Read in the timelapse configuration parameters, ideally this would be called on\n each iteration of the timelapse loop to ensure config updates are read\n :param _fname: - string, filename of the configuration to read in\n :return: - list, containing list of parameters to raspistill\n \"\"\"\n # open config file\n config_file = open(config_filename, 'r')\n\n # init config dict\n config_list = []\n\n # loop through config\n for line in config_file:\n if not line == '':\n line = line.strip()\n param = line.split('=')\n for i in range(len(param)):\n param[i] = param[i].strip()\n try:\n config_list.append(param[0])\n config_list.append(param[1])\n except Exception as e:\n pass\n return config_list\n\ndef alloc_name(_count):\n \"\"\"\n allocate a free filename ( don't overwrite previous files in case of crash )\n check image directory to find an image filename that's not currently taken\n :param _count: count to start at\n :return: tuple, (filename, count)\n \"\"\"\n filename = 'image'\n fname_dict = {}\n for fname in os.listdir(image_directory):\n if not fname_dict.has_key(fname):\n fname_dict[fname] = fname\n while fname_dict.has_key(filename + str(_count) + '.png'):\n _count += 1\n return (filename + str(_count) + '.png', _count)\n\ndef main(args):\n \"\"\"\n main function\n :param args: command line parameters\n :return: none\n \"\"\"\n # TODO:\n # parse arguments from command line\n\n global image_directory\n global config_filename\n\n if len(args) < 4:\n print ('usage:', args[0], ' ')\n return\n\n # set image dir\n image_directory = args[1]\n\n config_filename = args[2]\n\n delay = int(args[3])\n\n # create default config if it doesn't already exist\n if not os.path.isfile(config_filename):\n print ('')\n write_default_config()\n\n # init filecount to 0\n count = 0\n while True:\n # loop forever\n\n # get current configuration\n config_list = read_configuration()\n\n # add raspistill command to configuration list\n config_list.insert(0, 'raspistill')\n\n # alloc free filename for image ( don't overwrite )\n file_tuple = alloc_name(count)\n\n # get filename\n filename = file_tuple[0]\n\n #set image count\n count = file_tuple[1]\n\n # add image filename to config list\n config_list.append('-o')\n config_list.append(image_directory + '/' + filename)\n\n # take picture\n subprocess.call(config_list)\n\n #sleep for specified time\n time.sleep(delay)\n\n # increment count\n count += 1\n\nif __name__ == '__main__':\n main(sys.argv)","sub_path":"timelapse.py","file_name":"timelapse.py","file_ext":"py","file_size_in_byte":7702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"328707489","text":"import json\nimport re\nimport os\nimport codecs\nimport math\ndef write_json(dic, filename, encoding):\n dicjson=json.dumps(dic,indent=2)\n f = codecs.open(filename,\"w\",\"utf-8\")\n f.write(dicjson)\n f.close()\n\ndef separation(fichier):\n mots_uniques=[] # list de mots uniques\n f = open(\"fr/appr/\"+fichier,'r',encoding=\"utf8\")\n i=0 # id de ligne \n for ligne in f:\n #print(\"ligne \"+str(i)) \n ligne=ligne.strip() # supprimer \\n \\r et spaces en fin de ligne \n if len(ligne)==0 or ligne.isspace(): # si le ligne est vide \"\" ou si le ligne contienne sauf des espaces \n continue \n mots=ligne.split() # liste de mots dans le ligne\n for mot in mots:\n mot=mot.lower() # transformer en miniscule\n if mot not in mots_uniques:\n mot=re.sub(\"\\(\",\"\",mot)\n mot=re.sub(\"\\)\",\"\",mot)\n mot=re.sub(\"\\*\",\"\",mot)\n mot=re.sub(\"\\[\",\"\",mot)\n mot=re.sub(\"\\]\",\"\",mot)\n mot=re.sub(\"\\+\",\"\",mot)\n mot=re.sub(\"\\-\",\"\",mot)\n mots_uniques+=[mot]\n i+=1 \n return mots_uniques\n\n \ndef concordation(nombre): # retourne {mot1:{fichier1:[(index1,index1fin),(index2,index2find),..],fichier2:...},mot2:{...},...}\n d={}\n liste_fichier=os.listdir('fr/appr')\n for fichier in liste_fichier:\n liste_mot=separation(fichier) # [mot1,mot2,...]\n f=open(\"fr/appr/\"+fichier,\"r\",encoding=\"utf8\")\n texte_fichier=f.read() # tout le texte \n f.close()\n for mot in liste_mot : # pour chaque mot du fichier\n liste_index_mot=indexation(mot,fichier) #[(debut,fin,mot),...]\n if mot not in d and mot!=\"\":\n d[mot]={fichier:[(texte_fichier[index[0]-nombre:index[0]],texte_fichier[index[1]:index[1]+nombre])for index in liste_index_mot]}\n elif mot in d and mot!=\"\":\n d[mot][fichier]=[(texte_fichier[index[0]-nombre:index[0]],texte_fichier[index[1]:index[1]+nombre])for index in liste_index_mot]\n \n return d\n\n\ndef sauvegardeIndex(nombre):\n write_json(concordation(nombre),\"index_termes.JSON\",\"utf8\")\n\n\n \ndef indexation(mot,fichier): # [(debut,fin,mot),...]\n liste_index=[]\n f=open(\"fr/appr/\"+fichier,\"r\",encoding=\"utf8\")\n texte=f.read()\n texte=texte.lower()\n for a in re.finditer(mot,texte):\n liste_index.append((a.start(),a.end(),mot))\n f.close()\n return liste_index\n\n\n\n#EXERCICE 2----------------------------------------------------------------------------------------------------\n\n # concordation retourne {mot1:{fichier1:[(index1,index1fin),(index2,index2find),..],fichier2:...},mot2:{...},...}\ndef affichage(dico,mot): \n for fichier in dico[mot]:\n for index in dico[mot][fichier]:\n print(index[0].replace(\"\\n\", \" \")+'\\t'+mot+'\\t'+index[1].replace(\"\\n\", \" \"))\n\n\n#Exemple d'execution\n#EXEMPLE\nprint(\"Création de l'index en cours...\")\nsauvegardeIndex(20) \n# EXEMPLE\nprint(\"\\n Affichage du mot 'plus' \\n\")\naffichage(concordation(20),\"plus\")\n\n\n\n\n","sub_path":"L2/Semestre 3/Traitement automatique des langues/TP6v2 concordancier/bensitel_mekhelef_TP6.py","file_name":"bensitel_mekhelef_TP6.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"58933521","text":"# get the current script path.\nimport os\nhere = os.path.dirname(os.path.realpath(__file__))\nsubdir = \"rating-output\"\nfilename = \"myfile2.txt\"\nfilepath = os.path.join(here, subdir, filename)\n\n# create your subdirectory\n#os.mkdir(os.path.join(here, subdir))\n\n# create an empty file.\ntry:\n f = open(filepath, 'w')\n f.close()\nexcept IOError:\n print(\"Wrong path provided\")","sub_path":"RatingScraper/dirTest.py","file_name":"dirTest.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"205673039","text":"class mockSimulation:\n ## public\n \n def __init__(self):\n self.theta_mean = []\n self.theta_std = []\n self.r = 1#number of rounds\n self.s = 1# length of simulations\n self.N = 1# number of parallel simulations\n self.msm = None\n \n def run_multipleSim(self):\n return True\n def runNxtRound(self):\n return True\n \n \n ## private\n def PreAll(self, trj):\n \"\"\"\n Pre-Sampling:\n choose states with minimum counts or newly discovered states\n \n output:\n trj with shape of [[Xs][Ys]]\n \"\"\"\n import numpy as np\n comb_trj = np.concatenate(trj)\n\n #for theta in range(len(trj)):\n # comb_trj.append(np.concatenate(np.concatenate(trj[theta])))\n #trj_Sp = np.array(comb_trj) # pick all\n \n return trj_Sp\n\n\n def PreSamp_MC(self, trj, N = 20):\n \"\"\"\n Pre-Sampling for Monte Carlo simulations:\n choose states with minimum counts or newly discovered states\n \n output:\n trj with shape of \n \"\"\"\n import numpy as np\n cl_trjs = trj \n unique, counts = np.unique(cl_trjs, return_counts=True)\n leastPop = counts.argsort()[:N]\n init_cl = [unique[i] for i in leastPop]\n return init_cl\n\n \n def map(self, trj_Ps):\n \"\"\"\n\n output:\n n_ec x n_frames\n \"\"\"\n # map coordinate space to reaction coorinates space\n import numpy as np\n trj_Ps_theta = []\n msm = self.msm\n for frame in trj_Ps:\n theta = np.load('MSMStatesAllVals/Ave_AllECdist_cluster'+str(int(frame))+'.npy')[0][0] ## changed\n trj_Ps_theta.append(theta)\n #trj_Sp_theta = trj_Sp\n\n # change the format\n trj_Ps_theta_2 = []\n ##############\n trj_Ps_theta = np.array(trj_Ps_theta) \n for theta_index in range(len(trj_Ps_theta[0])):\n trj_Ps_theta_2.append(trj_Ps_theta[:,theta_index])\n return trj_Ps_theta_2\n\n def reward_state(self, S, theta_mean, theta_std, W_):\n \n r_s = 0\n for k in range(len(W_)):\n r_s = r_s + W_[k]*(abs(S[k] - theta_mean[k])/theta_std[k]) #No direction\n \"\"\"\n if (S[k] - theta_mean[k]) < 0: \n r_s = r_s + W_[k][0]*(abs(S[k] - theta_mean[k])/theta_std[k])\n else:\n r_s = r_s + W_[k][1]*(abs(S[k] - theta_mean[k])/theta_std[k])\n \"\"\"\n return r_s\n\n def reward_state_withoutStd(self, S, theta_mean, theta_std, W_):\n \n r_s = 0\n for k in range(len(W_)):\n \n r_s = r_s + W_[k]*(abs(S[k] - theta_mean[k])) # no direction\n \"\"\"\n if (S[k] - theta_mean[k]) < 0: \n r_s = r_s + W_[k][0]*(abs(S[k] - theta_mean[k]))\n else:\n r_s = r_s + W_[k][1]*(abs(S[k] - theta_mean[k]))\n \"\"\"\n return r_s\n\n\n def updateStat(self, trj_Sp_theta): \n import numpy as np\n theta_mean = []\n theta_std = []\n for theta in range(len(trj_Sp_theta)):\n theta_mean.append(np.mean(trj_Sp_theta[theta]))\n theta_std.append(np.std(trj_Sp_theta[theta]))\n self.theta_std = theta_std\n self.theta_mean = theta_mean\n \n\n def reward_trj(self, trj_Sp_theta, W_):\n \"\"\"\n \n \"\"\"\n import numpy as np\n #theta_mean = []\n #theta_std = []\n #for theta in range(len(W_)):\n # theta_mean.append(np.mean(trj_Sp_theta[theta]))\n # theta_std.append(np.std(trj_Sp_theta[theta]))\n \n\n r = []\n # for over all dicovered states\n trj_Sp_theta = np.array(trj_Sp_theta)\n for state_index in range(len(trj_Sp_theta[0])):\n #print('trj_Sp_theta', trj_Sp_theta)\n state_theta = trj_Sp_theta[:, state_index]\n r_s = self.reward_state(state_theta, self.theta_mean, self.theta_std, W_)\n \n r.append(r_s)\n \n R = np.sum(np.array(r))\n return R\n \n \n \n def updateW(self, trj_Sp_theta, W_0):\n \"\"\"\n update weigths \n prior_weigths = W_0\n \"\"\"\n def fun(x):\n global trj_Sp_theta_z\n\n W_0 = x\n r_0 = self.reward_trj(trj_Sp_theta, W_0)\n return -1*r_0 \n import numpy as np\n from scipy.optimize import minimize\n \n global trj_Sp_theta_z \n trj_Sp_theta_z = trj_Sp_theta\n alpha = 0.2\n delta = alpha\n cons = ({'type': 'eq',\n 'fun' : lambda x: np.array([np.sum(x)-1])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([np.min(x)])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([np.abs(np.sum(x-x0))+delta])})\n\n x0 = W_0\n res = minimize(fun, x0, constraints=cons)\n\n x = res.x\n\n W = x/(np.sum(x)) # changed\n return W\n \n def findStarting(self, trj_Ps_theta, trj_Ps, W_1, starting_n=10 , method = 'RL'):\n \"\"\"\n trj_Ps_theta: \n size n_theta x n_frames\n trj_Ps:\n \"\"\"\n # get new starting points (in theta domain) using new reward function based on updated weigths (W_1)\n import numpy as np \n theta_mean = []\n theta_std = []\n print(len(W_1), len(trj_Ps_theta))\n for theta in range(len(W_1)):\n theta_mean.append(np.mean(trj_Ps_theta[theta]))\n theta_std.append(np.std(trj_Ps_theta[theta]))\n \n ranks = {}\n trj_Ps_theta = np.array(trj_Ps_theta)\n for state_index in range(len(trj_Ps_theta[0])):\n state_theta = trj_Ps_theta[:,state_index]\n \n r = self.reward_state( state_theta, theta_mean, theta_std, W_1)\n \n ranks[state_index] = r\n\n newPoints_index0 = sorted(ranks.items(), key=lambda x: x[1], reverse=True)[0:starting_n] \n newPoints_index = np.array(newPoints_index0)[:,0] \n \n\n n_coord = 1 \n newPoints = [trj_Ps[int(i)] for i in newPoints_index] \n return newPoints\n \n \n def creatPotentioal(self):\n return True\n \n def run(self, inits, nstepmax = 10):\n \"\"\"\n Parameters\n ----------\n initi : \n initial state (singe state)\n msm :\n reference MSM\n s :\n lenght (number of steps) of each simulation\t\n \n output :\n final trajectory\n \"\"\"\n import numpy as np\n msm = self.msm\n N = len(inits)\n trjs = np.empty([N, nstepmax])\n for n in range(N):\n init = np.int(inits[n])\n trj = msm.sample_discrete(state=init, n_steps=nstepmax, random_state=None)\n trjs[n] = trj\n return trjs\n \n\n def isActive_singleRound(self, trjs):\n time = -1\n n_parTrjs = len(trjs)\n for trj in trjs:\n for frame in range(len(trj)):\n if self.isFolded(trj[frame]):\n time = n_parTrjs * frame\n return time\n return time\n\n########################################################################################################################\n# From calculated and saved files reads if the state is an active state or not\n########################################################################################################################\t\n\n def isFolded(self, state):\n import numpy as np \n foldedState = [463]\n isfolded = False\n if state in foldedState:\n isfolded = True\n return isfolded\n\n\n def runSimulation(self, R=3,N=10,s=8, method='RL'):\n import numpy as np\n activeTime = -1\n #init = 132 GPCRs B2AR\n init = 120 # WW domain\n inits = [init for i in range(N)]\n n_ec = 10\n W_0 = [1/n_ec for i in range(n_ec)]\n Ws = []\n trj1 = self.run(inits, nstepmax = s)\n comb_trj1 = np.concatenate(trj1)\n trjs = comb_trj1\n print(len(trjs))\n trj1_Ps = self.PreSamp_MC(trj1, N = 4*N) # pre analysis , 1 x n_frames\n trj1_Ps_theta = self.map(trj1_Ps)\n print(len(trj1_Ps_theta))\n newPoints = self.findStarting(trj1_Ps_theta, trj1_Ps, W_0, starting_n = N , method = 'RL')\n trjs_theta = trj1_Ps_theta\n trjs_Ps_theta = trj1_Ps_theta\n \n count = 1\n for round in range(R):\n self.updateStat(trjs_theta) # based on all trajectories\n W_1 = self.updateW(trjs_Ps_theta, W_0)\n W_0 = W_1\n Ws.append(W_0)\n \n trj1 = self.run(newPoints, nstepmax = s) # N (number of parallel) x n_all_frames\n isActive = self.isActive_singleRound(trj1)\n trj1 = np.concatenate(trj1) # 1 x n_all_frames\n \n if int(isActive)!=-1:\n print('Active')\n activeTime = (round)*N*s+isActive\n break\n \n com_trjs = np.concatenate((trjs, trj1))\n trjs = np.array(com_trjs)\n trjs_theta = np.array(self.map(trjs))\n trjs_Ps = self.PreSamp_MC(trjs, N = 5*N)\n trjs_Ps_theta = np.array(self.map(trjs_Ps))\n newPoints = self.findStarting(trjs_Ps_theta, trjs_Ps, W_1, starting_n = N , method = 'RL')\n count = count + 1\n \n np.save('foldTime_'+'r'+str(int(R))+'N'+str(N)+'s'+str(s), activeTime)\n np.save('w_'+'r'+str(int(R))+'N'+str(N)+'s'+str(s), Ws)\n return activeTime\n \n\n def multiSim_timeCal_script(self, method='RL'):\n\n T_n = range(10,1010,10) # number of trajectories\n T_len = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30] # lenght of trajectories\n N=10\n l = len(T_len)\n n = len(T_n)\n count = 1\n for i in range(l):\n for j in range(n):\n T_len1 = T_len[i]\n T_n1 = T_n[j]\n r=T_n1/N\n N=10\n s=T_len1\n myfile = open('run_'+'r'+str(int(r))+'N'+str(N)+'s'+str(s)+'.py','w')\n myfile.write('import pickle \\n')\n myfile.write('import RLSim as rl \\n')\n myfile.write('import numpy as np \\n')\n myfile.write('msm = pickle.load(open(\\'MSM.pkl\\',\\'rb\\'), encoding=\\'latin1\\') \\n')\n myfile.write('my_sim = rl.mockSimulation() \\n')\n myfile.write('my_sim.msm = msm \\n')\n myfile.write('my_sim.runSimulation(s='+str(int(s))+', R='+ str(int(r)) +', N='+ str(N)+') \\n')\n myRun = open('Run_'+str(count),'w')\n myRun.write('python run_'+'r'+str(int(r))+'N'+str(N)+'s'+str(int(s))+'.py')\n myRun.close()\n count = count + 1\n myfile.close()\n return\n\n def collect_times(self):\n import numpy as np\n T_len = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30] # lenght of trajectories\n T_n = range(10,1010,10) # number of trajectories\n N=10\n l = len(T_len)\n n = len(T_n)\n time = np.empty([l, n])\n for i in range(l):\n for j in range(n):\n T_len1 = T_len[i]\n T_n1 = T_n[j]\n r=T_n1/N\n N=10\n s=T_len1\n t = np.load('foldTime_'+'r'+str(int(r))+'N'+str(N)+'s'+str(s)+'.npy')\n print(t)\n time[i][j] = t.item()\n np.savetxt('times.txt', time)\n return time\n","sub_path":"MonteCarlo/Folding-ww/RLSimnoDir.py","file_name":"RLSimnoDir.py","file_ext":"py","file_size_in_byte":15003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"308468231","text":"import sys\nfrom tkinter import *\nimport time \nimport datetime\nimport RPi.GPIO as GPIO\nimport dht11\nimport geocoder\nfrom pprint import pprint\nimport requests \nfrom PIL import Image\nimport busio\nimport adafruit_sgp30\nimport board\nimport serial\nimport syslog\nimport time\nport = \"/dev/ttyACM0\"\ni2c_bus = busio.I2C(board.SCL, board.SDA, frequency = 1000000)\nsgp30 = adafruit_sgp30.Adafruit_SGP30(i2c_bus)\n\n#Line 67, ifelse\ncolor = \"black\"\nsize = 195\ntcolor = \"white\"\n\n#GPIO Settings to get the sensor working\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.cleanup()\ninstance = dht11.DHT11(pin=17)\nGPIO.setup(21, GPIO.IN, pull_up_down = GPIO.PUD_DOWN) #setup switch\ndef getSwitch():\n reverse = False\n if (GPIO.input(21) == GPIO.HIGH): #button on the right is puhsed\n reverse = True\n return reverse\nreverse = getSwitch()\nstartReverse = reverse\n \n#GPIO.setup(20, GPIO.IN, pull_up_down = GPIO.PUD_DOWN) #setup button\n#Get Color and Size of Window\n#color = input(\"Choose a color: \")\n#size = int(input(\"Select a size: \"))\n\n#Get Current Location\ng = geocoder.ip('me')\ncity = g.city\nlatlng = g.latlng\n\n#get Weather\ntry:\n res = requests.get('https://api.darksky.net/forecast/12b7e3f7df4ecaef5997ffaa2fcb46b2/{0},{1}'.format(latlng[0],latlng[1]))\nexcept:\n print(\"connection error at startup\")\n time.sleep(3)\n sys.exit()\ndata = (res.json())\ncurrent_temp = round(data['currently']['temperature'])\nmintemp = round(data['daily']['data'][0][\"temperatureLow\"])\nmaxtemp = round(data['daily']['data'][0][\"temperatureHigh\"])\n#summary = str(data['daily']['summary'])\nmintemp1 = round(data['daily']['data'][1][\"temperatureLow\"])\nmaxtemp1 = round(data['daily']['data'][1][\"temperatureHigh\"])\nmintemp2 = round(data['daily']['data'][2][\"temperatureLow\"])\nmaxtemp2 = round(data['daily']['data'][2][\"temperatureHigh\"])\nmintemp3 = round(data['daily']['data'][3][\"temperatureLow\"])\nmaxtemp3 = round(data['daily']['data'][3][\"temperatureHigh\"])\nmintemp4 = round(data['daily']['data'][4][\"temperatureLow\"])\nmaxtemp4 = round(data['daily']['data'][4][\"temperatureHigh\"])\nmintemp5 = round(data['daily']['data'][5][\"temperatureLow\"])\nmaxtemp5 = round(data['daily']['data'][5][\"temperatureHigh\"])\ndailysummary = str(data['daily']['data'][0][\"summary\"])\nicon0 = str(data['daily']['data'][0][\"icon\"])\nicon1 = str(data['daily']['data'][1][\"icon\"])\nicon2 = str(data['daily']['data'][2][\"icon\"])\nicon3 = str(data['daily']['data'][3][\"icon\"])\nicon4 = str(data['daily']['data'][4][\"icon\"])\nicon5 = str(data['daily']['data'][5][\"icon\"])\nsummary = str(maxtemp1)+ \"°/\" + str(mintemp1) + \"° \" + str(maxtemp2)+ \"°/\" + str(mintemp2) + \"° \" + str(maxtemp3)+ \"°/\" + str(mintemp3) + \"° \"+str(maxtemp4)+ \"°/\" + str(mintemp4) + \"° \" + str(maxtemp5)+ \"°/\" + str(mintemp5) + \"° \"\nsunrise = int(data['daily']['data'][0][\"sunriseTime\"])\nsunset = int(data['daily']['data'][0][\"sunsetTime\"])\n\nsunpassStart = False\ncurrentime = int(time.time())\nday = (currentime >= sunrise) and (currentime < sunset)\nnight = (currentime < sunrise) or (currentime >= sunset)\nif (currentime < sunset + 30 and currentime > sunset - 30) or (currentime < sunrise + 30 and currentime > sunrise -30):\n sunpassStart = True\n\n\n\ndef setcolors(sunset, sunrise):\n global reverse\n currentime = int(time.time())\n if reverse and (not sunpassStart): #switch is set to HIGH\n if (((currentime>= sunrise) and (currentime < sunset))):\n color = \"black\"\n tcolor = \"#AFAFAF\"\n else:\n color = \"white\" \n tcolor = \"black\"\n else:\n #reverse = False\n if (((currentime>= sunrise) and (currentime < sunset))):\n color = \"white\" \n tcolor = \"black\"\n else:\n color = \"black\"\n tcolor = \"#AFAFAF\"\n colist = [color, tcolor]\n return colist\n\ncolor = setcolors(sunset, sunrise)[0]\ntcolor = setcolors(sunset, sunrise)[1]\n\n#create labels and Tk box\nfont = \"times\"\nfontype = \"bold\"\nroot = Tk()\nroot.configure(background = color)\nroot.title(\"Clock\")\nright = -5\ndown = -40\nroot.geometry(\"+{}+{}\".format(right,down))\nroot.geometry(\"1700x1700\")\n\nclock = Label(root, font = (font, size, fontype), bg = color, fg = tcolor)\nclock.grid(row = 0, column = 0)\ndate = Label(root, font = (font, int(size/3), fontype), bg = color, fg = tcolor)\ndate.grid(row = 1, column = 0)\noutemp = Label(root, font = (font, int(size/2.2), fontype), bg = color, fg = tcolor)\noutemp.grid(row = 2, column = 0)\ndsum = Label(root, font = (font, int(size/5), fontype), bg = color, fg = tcolor)\ndsum.grid(row = 3, column = 0)\ntemp = Label(root, font = (font, int(size/5), fontype), bg = color, fg = tcolor)\ntemp.grid(row = 4 , column = 0)\nhumid = Label(root, font = (font, int(size/5), fontype), bg = color, fg = tcolor)\nhumid.grid(row = 5, column = 0)\nlocation = Label(root, font = (font, int(size/5), fontype), bg = color, fg = tcolor)\nlocation.grid(row = 6, column = 0)\nweekday = Label(root, font = (font, int(size/5), fontype), bg = color, fg = tcolor)\nweekday.grid(row = 7, column = 0)\nweeksum = Label(root, font = (font, int(size/4), fontype), bg = color, fg = tcolor)\nweeksum.grid(row = 8, column = 0)\niconrow = Label(root, font = (font, int(size/5), fontype), bg = color, fg = tcolor)\niconrow.grid(row = 9, column = 0)\n#photo = Image.open(\"Desktop/icons/cloudy.gif\")\nypos = 910\nxpos = 170\nxspace = 230\nbicons = 4\nsicons = 4\niconfile = \"/home/pi/icons/\"\nphoto0 = PhotoImage(file = str(iconfile +\"{0}.gif\".format(icon0)))\nphoto0 = photo0.subsample(bicons)\nLicon0 = Label(root, image = photo0,bd = 0, highlightthickness = 0, bg = color)\nLicon0.place(x = 540, y = 405)\nphoto1 = PhotoImage(file = str(iconfile +\"{0}.gif\".format(icon1)))\nphoto1 = photo1.subsample(sicons)\nLicon1 = Label(root, image = photo1,bd = 0, highlightthickness = 0, bg = color)\nLicon1.place(x = xpos, y = ypos)\nphoto2 = PhotoImage(file = str(iconfile +\"{0}.gif\".format(icon2)))\nphoto2 = photo2.subsample(sicons)\nLicon2 = Label(root, image = photo2,bd = 0, highlightthickness = 0, bg = color)\nLicon2.place(x = xpos + xspace, y = ypos)\nphoto3 = PhotoImage(file = str(iconfile +\"{0}.gif\".format(icon3)))\nphoto3 = photo3.subsample(sicons)\nLicon3 = Label(root, image = photo3,bd = 0, highlightthickness = 0, bg = color)\nLicon3.place(x = xpos + (2*xspace), y = ypos)\nphoto4 = PhotoImage(file = str(iconfile +\"{0}.gif\".format(icon4)))\nphoto4 = photo4.subsample(sicons)\nLicon4 = Label(root, image = photo4,bd = 0, highlightthickness = 0, bg = color)\nLicon4.place(x = xpos + (3*xspace), y = ypos)\nphoto5 = PhotoImage(file = str(iconfile +\"{0}.gif\".format(icon5)))\nphoto5 = photo5.subsample(sicons)\nLicon5 = Label(root, image = photo5,bd = 0, highlightthickness = 0, bg = color)\nLicon5.place(x = xpos + (4*xspace), y = ypos)\n\nygap = 140\nxpos = xpos + 35\nxspace = xspace - 3\nday1ob = Label(root, font = (font, int(size/4), fontype), bg = color, fg = tcolor)\nday1ob.place(x = xpos + (0*xspace), y = ypos - ygap)\nday2ob = Label(root, font = (font, int(size/4), fontype), bg = color, fg = tcolor)\nday2ob.place(x = xpos + (1*xspace), y = ypos - ygap)\nday3ob = Label(root, font = (font, int(size/4), fontype), bg = color, fg = tcolor)\nday3ob.place(x = xpos + (2*xspace), y = ypos - ygap)\nday4ob = Label(root, font = (font, int(size/4), fontype), bg = color, fg = tcolor)\nday4ob.place(x = xpos + (3*xspace), y = ypos - ygap)\nday5ob = Label(root, font = (font, int(size/4), fontype), bg = color, fg = tcolor)\nday5ob.place(x = xpos + (4*xspace), y = ypos - ygap)\n \n \n#get current weekday as Su M T W Th F S Su\ndef getweekday(date):\n year = int(date[0:4])\n month = int(date[5:7])\n day = int(date[8:10])\n dow = int(datetime.date(year,month,day).weekday())\n return dow\n\n#converting 24 hour time to 12 hour AMPM time\ndef to12(time):\n time = str(time)[0:8]\n hour = int(time[0:2])\n AMPM = \"AM\"\n if hour == 12:\n AMPM = \"PM\" #noon\n if hour == 0:\n AMPM = \"AM\" #midnight\n hour = 12\n elif hour > 12:\n hour = hour-12\n AMPM = \"PM\"\n if hour < 10:\n hour = str(\" \" + str(hour))\n time = str(str(hour) + time[2:8] +\" \"+AMPM)\n return time\n\n\n#converting yyyy-mm-dd to Weekday, Month Year\ndef date2weekday(date):\n week = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n rdth = \"th\"\n year = int(date[0:4])\n month = int(date[5:7])\n monthprint = months[month-1]\n day = int(date[8:10])\n if (day == 1 or day == 21 or day == 31):\n rdth = \"st\"\n if (day == 2 or day == 22):\n rdth = \"nd\"\n if (day == 3 or day == 23):\n rdth = \"rd\"\n dow = int(datetime.date(year,month,day).weekday())\n dow = week[dow]\n newdatetime = str(dow+\" \"+str(monthprint)+\" \"+str(day)+ rdth+\", \"+ str(year))\n # newdatetime = \"Wednesday, September 30th, 2019\"\n return newdatetime\n#connect to ardunio \n#ard = (serial.Serial(port,9600))\nmscount = 0\n#Update Clock\ndef tick():\n global reverse\n global xspace\n global xpos\n global ypos\n global ygap\n global Licon0\n global Licon1\n global Licon2\n global Licon3\n global Licon4\n global Licon5\n global color\n global tcolor\n global clock\n global date\n global outemp\n global dsum\n global temp\n global humid\n global location\n global weekday\n global weeksum\n global iconrow\n global day1ob\n global day2ob\n global day3ob\n global day4ob\n global day5ob\n global mscount\n global mintemp\n global maxtemp\n global mintemp1\n global maxtemp1\n global mintemp2\n global maxtemp2\n global mintemp3\n global maxtemp3\n global mintemp4\n global maxtemp4\n global mintemp5\n global maxtemp5\n global current_temp\n global dailysummary\n global summary\n global sunrise\n global sunset\n global icon0\n global icon1\n global icon2\n global icon3\n global icon4\n global icon5\n global photo0\n global photo1\n global photo2\n global photo3\n global photo4\n global photo5\n global iconfile\n mscount += 200\n currentime = int(time.time())\n day = (currentime >= sunrise) and (currentime < sunset)\n night = (currentime < sunrise) or (currentime >= sunset)\n\n reverse = getSwitch()\n if (currentime < sunset + 30 and currentime > sunset - 30) or (currentime < sunrise + 30 and currentime > sunrise -30):\n reverse = False\n if ((((color == \"white\") and (night)) or ((color == \"black\") and (day))) and (not reverse)):\n sys.exit()\n if reverse != startReverse:\n sys.exit()\n time_string = datetime.datetime.now().time()\n # 120000 ms = 2 minutes.\n #Ensure when the counter is reset, the weather isn't updated again\n if mscount % 120000 == 0:\n try:\n res = requests.get('https://api.darksky.net/forecast/12b7e3f7df4ecaef5997ffaa2fcb46b2/{0},{1}'.format(latlng[0],latlng[1]))\n except:\n print(\"connection error\")\n sys.exit()\n data = (res.json())\n mintemp = round(data['daily']['data'][0][\"temperatureLow\"])\n maxtemp = round(data['daily']['data'][0][\"temperatureHigh\"])\n #summary = str(data['daily']['summary'])\n mintemp1 = round(data['daily']['data'][1][\"temperatureLow\"])\n maxtemp1 = round(data['daily']['data'][1][\"temperatureHigh\"])\n mintemp2 = round(data['daily']['data'][2][\"temperatureLow\"])\n maxtemp2 = round(data['daily']['data'][2][\"temperatureHigh\"])\n mintemp3 = round(data['daily']['data'][3][\"temperatureLow\"])\n maxtemp3 = round(data['daily']['data'][3][\"temperatureHigh\"])\n mintemp4 = round(data['daily']['data'][4][\"temperatureLow\"])\n maxtemp4 = round(data['daily']['data'][4][\"temperatureHigh\"])\n mintemp5 = round(data['daily']['data'][5][\"temperatureLow\"])\n maxtemp5 = round(data['daily']['data'][5][\"temperatureHigh\"])\n current_temp = round(data['currently']['temperature'])\n dailysummary = str(data['daily']['data'][0][\"summary\"])\n sunrise = int(data['daily']['data'][0][\"sunriseTime\"])\n sunset = int(data['daily']['data'][0][\"sunsetTime\"])\n icon0 = str(data['daily']['data'][0][\"icon\"])\n photo0 = PhotoImage(file = str(iconfile+\"{0}.gif\".format(icon0)))\n photo0 = photo0.subsample(bicons)\n icon1 = str(data['daily']['data'][1][\"icon\"])\n photo1 = PhotoImage(file = str(iconfile+\"{0}.gif\".format(icon1)))\n photo1 = photo1.subsample(sicons)\n icon2 = str(data['daily']['data'][2][\"icon\"])\n photo2 = PhotoImage(file = str(iconfile+\"{0}.gif\".format(icon2)))\n photo2 = photo2.subsample(sicons)\n icon3 = str(data['daily']['data'][3][\"icon\"])\n photo3 = PhotoImage(file = str(iconfile+\"{0}.gif\".format(icon3)))\n photo3 = photo3.subsample(sicons)\n icon4 = str(data['daily']['data'][4][\"icon\"])\n photo4 = PhotoImage(file = str(iconfile+\"{0}.gif\".format(icon4)))\n photo4 = photo4.subsample(sicons)\n icon5 = str(data['daily']['data'][5][\"icon\"])\n photo5 = PhotoImage(file = str(iconfile+\"{0}.gif\".format(icon5)))\n photo5 = photo5.subsample(sicons)\n summary = str(maxtemp1)+ \"°/\" + str(mintemp1) + \"° \" + str(maxtemp2)+ \"°/\" + str(mintemp2) + \"° \" + str(maxtemp3)+ \"°/\" + str(mintemp3) + \"° \"+str(maxtemp4)+ \"°/\" + str(mintemp4) + \"° \" + str(maxtemp5)+ \"°/\" + str(mintemp5) + \"° \"\n mscount = 0\n #Update the weather, otherwise the weather is not updated.\n time_string = to12(time_string)\n date_string = datetime.datetime.now().date()\n week = [\" M\", \" T\", \"W\", \"Th\", \" F\", \" S\", \"Su\"]\n today = int(getweekday(str(date_string)))\n day1 = str(week[(today+1) % 7])\n day2 = str(week[(today+2) % 7])\n day3 = str(week[(today+3) % 7])\n day4 = str(week[(today+4) % 7])\n day5 = str(week[(today+5) % 7])\n date_string = date2weekday(str(date_string))\n outemp_string = str(current_temp) + \"° \" + str(maxtemp)+ \"°/\" + str(mintemp) + \"°\"\n dsum_string = dailysummary\n #Grab sensor data for interior temp and humidity. Only if valid reading from sensor\n #ardln = str(ard.readline())\n #leng = len(ardln)\n #ardln = ardln[2:(leng-5)]\n result = instance.read()\n if result.is_valid():\n AH = ((6.112*(2.71828**((17.67*result.temperature)/(result.temperature + 243.5)))*result.humidity*2.1674)/(273.15+result.temperature)) \n sgp30.set_iaq_humidity(AH)\n #print(AH)\n eCO2, TVOC = sgp30.iaq_measure()\n #ard = \"4.83\"\n #humid_string = (\"Room Humidity: \"+ str(result.humidity)+ \"%\" +\"\\t\\tCO = {0} ppm\".format(ardln))\n humid_string = (\"Room Humidity: \"+ str(result.humidity)+ \"%\" +\"\\t\\tTVOC = {0} ppb\".format(TVOC))\n humid_string = str(humid_string)\n F = (float(result.temperature *(9/5) + 32))\n F = str(F)[0:5]\n if len(F) == 2:\n F = F + \".\"\n while len(F) < 5:\n F = F +\"0\"\n temp_string = str(\"Room Temperature: \" + F +\" F\"+ \"\\tCO2 = %d ppm\" % (eCO2))\n temp.config(text=temp_string)\n humid.config(text=humid_string) \n #display the labels as text\n clock.config(text=time_string)\n date.config(text= date_string)\n outemp.config(text= outemp_string)\n location.config(text = city)\n weeksum.config(text = summary)\n day1ob.config(text = day1)\n day2ob.config(text = day2)\n day3ob.config(text = day3)\n day4ob.config(text = day4)\n day5ob.config(text = day5)\n dsum.config(text = dsum_string)\n Licon0.config(image = photo0)\n Licon1.config(image = photo1)\n Licon2.config(image = photo2)\n Licon3.config(image = photo3)\n Licon4.config(image = photo4)\n Licon5.config(image = photo5)\n #Run this function every 200 msi\n root.after(200, tick)\ntry:\n tick()\n root.mainloop()\nexcept:\n print(\"restarting !\")\n sys.exit()\n","sub_path":"clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":17047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"43124300","text":"from concurrent.futures import ThreadPoolExecutor\nfrom search import Search\nimport time\nimport os\n\n\nif not os.path.exists(\"output\"):\n os.mkdir(\"output\")\n\nchannels = ('web', 'wiki_baike', 'hudong_baike', 'baidu_baike', 'news',\n 'weibo', 'knowledge')\n\nsearch_channels = ThreadPoolExecutor(max_workers=7)\nwc_file = 'wc.txt'\n\n\ndef test_single_channel(channel):\n s = Search(channel)\n start = time.time()\n\n def worker(inst, l):\n entity = l.split()[0].strip()\n inst.check_match(entity, s.search(entity))\n\n if channel == \"weibo\":\n with ThreadPoolExecutor(max_workers=5) as e:\n with open(wc_file, encoding='utf-8') as f:\n f.readline() # first line\n for line in f:\n e.submit(worker, s, line)\n else:\n with open(wc_file, encoding='utf-8') as f:\n f.readline() # first line\n for line in f:\n worker(s, line)\n\n end = time.time()\n s.logger.info(\"%s elapsed time: %ds\" % (channel, end-start))\n s.not_match.close()\n s.match.write(str(s.match_count))\n s.match.close()\n\n\nfor c in channels:\n search_channels.submit(test_single_channel, c)\n","sub_path":"channel_test/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"187811068","text":"import re\nimport click\nimport subprocess\nimport json\nimport os\nfrom ...utils.token import parse_token\nfrom .commit import commit\nfrom ...utils.env_keys import REPORT_ERROR_KEY\nfrom ...utils.http_client import LaunchableClient\nfrom ...utils.session import clean_session_files\n\n\n@click.command()\n@click.option(\n '--name',\n 'build_name',\n help='build name',\n required=True,\n type=str,\n metavar='BUILD_NAME'\n)\n@click.option(\n '--source',\n help='path to local Git workspace, optionally prefixed by a label. '\n ' like --source path/to/ws or --source main=path/to/ws',\n default=[\".\"],\n metavar=\"REPO_NAME\",\n multiple=True\n)\n@click.pass_context\ndef build(ctx, build_name, source):\n token, org, workspace = parse_token()\n\n clean_session_files(days_ago=14)\n\n # This command accepts REPO_NAME=REPO_DIST and REPO_DIST\n repos = [s.split('=') if re.match(r'[^=]+=[^=]+', s) else (s, s)\n for s in source]\n # TODO: if repo_dist is absolute path, warn the user that that's probably not what they want to do\n\n for (name, repo_dist) in repos:\n ctx.invoke(commit, source=repo_dist)\n\n sources = [(\n name,\n subprocess.check_output(\n \"git rev-parse HEAD\".split(), cwd=repo_dist\n ).decode().replace(\"\\n\", \"\")\n ) for name, repo_dist in repos]\n submodules = []\n for repo_name, repo_dist in repos:\n # invoke git directly because dulwich's submodule feature was broken\n submodule_stdouts = subprocess.check_output(\n \"git submodule status --recursive\".split(), cwd=repo_dist\n ).decode().splitlines()\n for submodule_stdout in submodule_stdouts:\n # the output is e.g.\n # \"+bbf213437a65e82dd6dda4391ecc5d598200a6ce sub1 (heads/master)\"\n matched = re.search(\n r\"^[\\+\\-U ](?P[a-f0-9]{40}) (?P\\w+)\",\n submodule_stdout\n )\n if matched:\n hash = matched.group('hash')\n name = matched.group('name')\n if hash and name:\n submodules.append((repo_name+\"/\"+name, hash))\n\n # Note: currently becomes unique command args and submodules by the hash.\n # But they can be conflict between repositories.\n uniq_submodules = {hash: (name, hash)\n for name, hash in sources + submodules}.values()\n\n try:\n commitHashes = [{\n 'repositoryName': name,\n 'commitHash': hash\n } for name, hash in uniq_submodules]\n\n if not (commitHashes[0]['repositoryName']\n and commitHashes[0]['commitHash']):\n exit('Please specify --source as --source .')\n\n payload = {\n \"buildNumber\": build_name,\n \"commitHashes\": commitHashes\n }\n\n headers = {\n \"Content-Type\": \"application/json\",\n }\n\n path = \"/intake/organizations/{}/workspaces/{}/builds\".format(\n org, workspace)\n\n client = LaunchableClient(token)\n res = client.request(\"post\", path, data=json.dumps(\n payload).encode(), headers=headers)\n res.raise_for_status()\n\n except Exception as e:\n if os.getenv(REPORT_ERROR_KEY):\n raise e\n else:\n print(e)\n","sub_path":"launchable/commands/record/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"545539607","text":"import socket\nimport time\nimport sys\n\nclass WifiConnector(object):\n\n def __init__(self):\n self.tcp_ip = '192.168.11.11' # RPI IP address\n #self.tcp_ip = 'localhost'\n self.port = 5111\n self.conn = None\n self.client = None\n self.addr = None\n self.pc_is_connect = False\n\n def close_pc_socket(self):\n\n if self.conn:\n self.conn.close()\n print (\"Closing server socket\")\n if self.client:\n self.client.close()\n print (\"Closing client socket\")\n self.pc_is_connect = False\n\n def is_pc_connection(self):\n return self.pc_is_connect\n\n def send_message_PC(self, message):\n \n try:\n self.client.sendto(message, self.addr)\n print (\"Sent to PC: %s\" % message)\n\n except Exception as e:\n print (\"\\nPC Write Error: %s \" % str(e))\n self.close_pc_socket()\n self.initialise_connection()\n\n def initialise_connection(self):\n # Create a TCP/IP socket\n try:\n self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) \n self.conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n self.conn.bind((self.tcp_ip, self.port))\n self.conn.listen(1) \n print (\"Listening for incoming connections from PC...\")\n self.client, self.addr = self.conn.accept() \n print (\"Connected! Connection address: \", self.addr)\n self.pc_is_connect = True\n\n\n except Exception as e: \n print (\"\\nError: %s\" % str(e))\n\n\n\n def receive_from_PC(self):\n\n try:\n if self.client is None:\n return \n pc_data = self.client.recv(512)\n print (\"Read from PC: %s\" %pc_data)\n return pc_data\n\n except Exception as e:\n print (\"\\nPC Read Error: %s \" % str(e))\n self.close_pc_socket()\n self.initialise_connection()\n\n\n# Test wifi (Host) -- To test client use pc_test_socket.py\n\n# if __name__ == \"__main__\":\n# print (\"main\")\n# pc = WifiConnector()\n# pc.initialise_connection()\n# send_msg = ('Rpi Ready\\n')\n# print (\"send_message_PC(): %s \" % send_msg)\n# pc.send_message_PC(send_msg)\n#\n# while True: \n#\n# print (\"read\")\n# msg = pc.receive_from_PC()\n# print (\"data received: %s \" % msg)\n# pc.send_message_PC(send_msg)\n#\n# print (\"closing sockets\")\n# pc.close_pc_socket()\n\n","sub_path":"Algo,Android,RPI/RPi/Backup 6 Mar/wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"388867593","text":"\"\"\"\nRipped directly out of Django (r8434) before validators were removed forever.\nThese should be ported out of our code and this file removed as soon as \nModelValidation lands or we write the code to do validation better.\n\nA library of validators that return None and raise ValidationError when the\nprovided data isn't valid.\n\nValidators may be callable classes, and they may have an 'always_test'\nattribute. If an 'always_test' attribute exists (regardless of value), the\nvalidator will *always* be run, regardless of whether its associated\nform field is required.\n\"\"\"\n\nimport urllib\nimport re\nimport os\nimport subprocess\nimport tempfile\nimport codecs\nfrom django.conf import settings\nfrom django.utils.functional import Promise, lazy\nfrom django.core.exceptions import ValidationError\n\nclass CriticalValidationError(Exception):\n def __init__(self, message):\n \"ValidationError can be passed a string or a list.\"\n if isinstance(message, list):\n self.messages = [force_unicode(msg) for msg in message]\n else:\n assert isinstance(message, (basestring, Promise)), (\"'%s' should be a string\" % message)\n self.messages = [force_unicode(message)]\n\n def __str__(self):\n return str(self.messages)\n\nclass RelaxNGCompact(object):\n \"Validate against a Relax NG compact schema\"\n def __init__(self, schema_path, additional_root_element=None):\n self.schema_path = schema_path\n self.additional_root_element = additional_root_element\n\n def __call__(self, field_data, all_data):\n if self.additional_root_element:\n field_data = '<%(are)s>%(data)s\\n' % {\n 'are': self.additional_root_element,\n 'data': field_data\n }\n filename = tempfile.mktemp() # Insecure, but nothing else worked\n \n fp = codecs.open(filename, 'w', 'utf-8')\n fp.write(field_data)\n fp.close()\n jing_path = getattr(settings, 'JING_PATH', 'jing')\n output = subprocess.Popen([jing_path, \"-c\", self.schema_path, filename], stdout=subprocess.PIPE).communicate()[0]\n errors = [line.strip() for line in output.splitlines()]\n os.unlink(filename)\n display_errors = []\n lines = field_data.split('\\n')\n for error in errors:\n ignored, line, level, message = error.split(':', 3)\n # Scrape the Jing error messages to reword them more nicely.\n m = re.search(r'Expected \"(.*?)\" to terminate element starting on line (\\d+)', message)\n if m:\n display_errors.append(_('Please close the unclosed %(tag)s tag from line %(line)s. (Line starts with \"%(start)s\".)') % \\\n {'tag':m.group(1).replace('/', ''), 'line':m.group(2), 'start':lines[int(m.group(2)) - 1][:30]})\n continue\n if message.strip() == 'text not allowed here':\n display_errors.append(_('Some text starting on line %(line)s is not allowed in that context. (Line starts with \"%(start)s\".)') % \\\n {'line':line, 'start':lines[int(line) - 1][:30]})\n continue\n m = re.search(r'\\s*attribute \"(.*?)\" not allowed at this point; ignored', message)\n if m:\n display_errors.append(_('\"%(attr)s\" on line %(line)s is an invalid attribute. (Line starts with \"%(start)s\".)') % \\\n {'attr':m.group(1), 'line':line, 'start':lines[int(line) - 1][:30]})\n continue\n m = re.search(r'\\s*unknown element \"(.*?)\"', message)\n if m:\n display_errors.append(_('\"<%(tag)s>\" on line %(line)s is an invalid tag. (Line starts with \"%(start)s\".)') % \\\n {'tag':m.group(1), 'line':line, 'start':lines[int(line) - 1][:30]})\n continue\n if message.strip() == 'required attributes missing':\n display_errors.append(_('A tag on line %(line)s is missing one or more required attributes. (Line starts with \"%(start)s\".)') % \\\n {'line':line, 'start':lines[int(line) - 1][:30]})\n continue\n m = re.search(r'\\s*bad value for attribute \"(.*?)\"', message)\n if m:\n display_errors.append(_('The \"%(attr)s\" attribute on line %(line)s has an invalid value. (Line starts with \"%(start)s\".)') % \\\n {'attr':m.group(1), 'line':line, 'start':lines[int(line) - 1][:30]})\n continue\n # Failing all those checks, use the default error message.\n display_error = 'Line %s: %s [%s]' % (line, message, level.strip())\n display_errors.append(display_error)\n if len(display_errors) > 0:\n raise ValidationError\n\n","sub_path":"apps/core/lib/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"186060336","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\nfrom .AttModel import AttModel, Attention\n\n\nclass SamePad2d(nn.Module):\n \"\"\"Mimics tensorflow's 'SAME' padding.\n \"\"\"\n def __init__(self, kernel_size, stride):\n super(SamePad2d, self).__init__()\n self.kernel_size = torch.nn.modules.utils._pair(kernel_size)\n self.stride = torch.nn.modules.utils._pair(stride)\n\n def forward(self, input):\n in_width = input.size()[2]\n in_height = input.size()[3]\n out_width = math.ceil(float(in_width) / float(self.stride[0]))\n out_height = math.ceil(float(in_height) / float(self.stride[1]))\n pad_along_width = ((out_width - 1) * self.stride[0] +\n self.kernel_size[0] - in_width)\n pad_along_height = ((out_height - 1) * self.stride[1] +\n self.kernel_size[1] - in_height)\n pad_left = int(math.floor(pad_along_width / 2))\n pad_top = int(math.floor(pad_along_height / 2))\n pad_right = int(pad_along_width - pad_left)\n pad_bottom = int(pad_along_height - pad_top)\n return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), 'constant', 0)\n\n def __repr__(self):\n return self.__class__.__name__\n\nclass ContextAttention(nn.Module):\n def __init__(self, opt):\n super(ContextAttention, self).__init__()\n self.opt = opt\n self.predicate_modules = []\n for k in range(self.opt.num_predicates):\n predicate_group = nn.Sequential(\n SamePad2d(kernel_size=self.opt.conv_predicate_kernel, stride=1),\n nn.Conv2d(1, self.opt.conv_predicate_channels, kernel_size=self.opt.conv_predicate_kernel, stride=1,padding=0),\n nn.ReLU(inplace=True),\n SamePad2d(kernel_size=self.opt.conv_predicate_kernel, stride=1),\n nn.Conv2d(self.opt.conv_predicate_channels, self.opt.conv_predicate_channels,kernel_size=self.opt.conv_predicate_kernel, stride=1, padding=0),\n nn.ReLU(inplace=True),\n SamePad2d(kernel_size=self.opt.conv_predicate_kernel, stride=1),\n nn.Conv2d(self.opt.conv_predicate_channels, self.opt.conv_predicate_channels,kernel_size=self.opt.conv_predicate_kernel, stride=1, padding=0),\n nn.ReLU(inplace=True),\n SamePad2d(kernel_size=self.opt.conv_predicate_kernel, stride=1),\n nn.Conv2d(self.opt.conv_predicate_channels, 1, kernel_size=self.opt.conv_predicate_kernel, stride=1,padding=0),\n nn.ReLU(inplace=True)\n ).cuda()\n self.predicate_modules.append(predicate_group)\n\n #self.h2att = nn.Linear(self.opt.rnn_size, self.opt.att_size*self.opt.att_size)\n self.h2att = nn.Linear(self.opt.rnn_size, self.opt.att_size * self.opt.att_size)\n\n self.alpha_net = nn.Linear(self.opt.att_size * self.opt.att_size, 1)\n\n #self.max_pool = nn.MaxPool1d(self.opt.num_predicates, stride=1)\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_normal_(p)\n\n def forward(self, att_weight, prev_h, att_feats, p_att_feats, soft_att=None):\n \"\"\"\n att_weight (80,196)\n prev_h (80,1024)\n att_feats (80,196,1024)\n p_att_feats (80, 196, 512)\n soft_att (80,1024)\n \"\"\"\n batch_size = att_feats.size(0)\n soft_att = soft_att.unsqueeze(1) #[80, 1, 1024]\n multiply = att_feats * soft_att.expand_as(att_feats) #[80, 196, 1024]\n multiply = torch.sum(multiply, dim=-1).squeeze(-1) #[80, 196]\n multiply = F.relu(multiply)\n multiply = multiply.view(batch_size, self.opt.att_size, self.opt.att_size)\n ############################################\n conv_outputs = []\n for group in self.predicate_modules:\n att_map = multiply.unsqueeze(1)\n att_map = group(att_map)\n conv_outputs.append(att_map)\n\n conv_outputs = torch.cat([_ for _ in conv_outputs[:]], dim=1).contiguous()\n conv_outputs = conv_outputs.view(batch_size, len(self.predicate_modules), -1) # [80, 5, 196]\n\n att_hid_size = self.opt.att_size * self.opt.att_size\n att_h = self.h2att(prev_h) # [80, 196]\n att_h = att_h.unsqueeze(1).expand_as(conv_outputs)\n dot = att_h + conv_outputs # [80, 5, 196]\n dot = F.tanh(dot)\n dot = dot.view(-1, att_hid_size)\n dot = self.alpha_net(dot) # [80, 5, 1]\n dot = dot.view(batch_size, -1) # [80, 5]\n weight = F.softmax(dot, dim=1) # [80, 1, 5] [80, 5, 196]\n att_res = torch.bmm(weight.unsqueeze(1), conv_outputs).squeeze(1) # [80, 196]\n # print(att_res.size())\n # TODO softmax will do an impact on effiency\n # TODO Add tanh in the end --- No tanh, I want to activated region saved\n att_res = F.tanh(att_res)\n att_res = F.softmax(att_res, dim=1)\n att_feats_ = att_feats.view(-1, att_hid_size, att_feats.size(-1))\n con_att = torch.bmm(att_res.unsqueeze(1), att_feats_).squeeze(1)\n # print(con_att.size())\n # print(att_res.size())\n # [80, 1024] [80,196]\n return con_att, att_res\n\n def forward__(self, att_weight, prev_h, att_feats, p_att_feats):\n \"\"\"\n Max_pooling\n KL divergence\n \"\"\"\n batch_size = att_feats.size(0)\n att_weight = att_weight.view(-1, self.opt.att_size, self.opt.att_size)\n conv_outputs = []\n for group in self.predicate_modules:\n att_map = att_weight.unsqueeze(1)\n att_map = group(att_map) # [50, 1, 7, 7]\n conv_outputs.append((att_map))\n conv_outputs = torch.cat([_ for _ in conv_outputs[:]], dim=1).contiguous()\n conv_outputs = conv_outputs.view(batch_size, len(self.predicate_modules), -1) # [80, 5, 196]\n conv_outputs = conv_outputs.transpose(1,2) #[80, 196, 5]\n #conv_outputs = self.max_pool(conv_outputs).squeeze(-1) #[80, 196]\n\n #conv_outputs = F.tanh(conv_outputs) #tanh() activation\n\n conv_outputs = torch.sum(conv_outputs, dim=-1).squeeze(-1)\n sum_outputs = (torch.sum(conv_outputs, dim=-1).unsqueeze(-1)).expand_as(conv_outputs)\n conv_outputs = conv_outputs / sum_outputs\n\n att_hid_size = self.opt.att_size * self.opt.att_size\n\n #att_res = F.tanh(conv_outputs)\n att_res = conv_outputs # [80, 196]\n\n #att_res = F.softmax(att_res, dim=1)\n att_feats_ = att_feats.view(-1, att_hid_size, att_feats.size(-1))\n con_att = torch.bmm(att_res.unsqueeze(1), att_feats_).squeeze(1)\n\n return con_att, att_res\n\n def forward_(self, att_weight, prev_h, att_feats, p_att_feats):\n batch_size = att_feats.size(0)\n att_weight = att_weight.view(-1, self.opt.att_size, self.opt.att_size)\n conv_outputs = []\n for group in self.predicate_modules:\n att_map = att_weight.unsqueeze(1)\n att_map = group(att_map) #[50, 1, 7, 7]\n conv_outputs.append((att_map))\n conv_outputs = torch.cat([_ for _ in conv_outputs[:]], dim=1).contiguous()\n conv_outputs = conv_outputs.view(batch_size, len(self.predicate_modules), -1) #[80, 5, 196]\n \"\"\"\n att_feats [80, 196, 1024]\n conv_outputs [80, 5, 196]\n p_att_feats [80, 196, 512] \n prev_h [80, 1024]\n \"\"\"\n att_hid_size = self.opt.att_size * self.opt.att_size\n att_h = self.h2att(prev_h) #[80, 196]\n att_h = att_h.unsqueeze(1).expand_as(conv_outputs)\n dot = att_h + conv_outputs #[80, 5, 196]\n dot = F.tanh(dot)\n dot = dot.view(-1, att_hid_size)\n dot = self.alpha_net(dot) #[80, 5, 1]\n dot = dot.view(batch_size, -1) #[80, 5]\n weight = F.softmax(dot, dim=1) #[80, 1, 5] [80, 5, 196]\n att_res = torch.bmm(weight.unsqueeze(1), conv_outputs).squeeze(1) #[80, 196]\n #print(att_res.size())\n #TODO softmax will do an impact on effiency\n #TODO Add tanh in the end\n att_res = F.tanh(att_res)\n att_res = F.softmax(att_res, dim=1)\n att_feats_ = att_feats.view(-1, att_hid_size, att_feats.size(-1))\n con_att = torch.bmm(att_res.unsqueeze(1), att_feats_).squeeze(1)\n #print(con_att.size())\n #print(att_res.size())\n #[80, 1024] [80,196]\n return con_att, att_res\n\nclass DecisionNetwork(nn.Module):\n def __init__(self, opt):\n super(DecisionNetwork, self).__init__()\n self.opt = opt\n self.h2att = nn.Linear(self.opt.rnn_size, self.opt.rnn_size)\n self.alpha_net = nn.Linear(self.opt.rnn_size, 1)\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_normal_(p)\n\n def forward(self, soft_att, context_att, soft_weight, context_weight, prev_h):\n #[80, 2, 1024]\n att = torch.cat([soft_att.unsqueeze(1), context_att.unsqueeze(1)], dim=1)\n att_h = self.h2att(prev_h)\n att_h = att_h.unsqueeze(1).expand_as(att) # [80, 2, 1024]\n dot = att_h + att\n dot = F.tanh(dot)\n dot = dot.view(-1, self.opt.rnn_size)\n dot = self.alpha_net(dot)\n dot = dot.view(-1, 2)\n\n weight = F.softmax(dot, dim=1) #[batch, 2]\n att_res = torch.bmm(weight.unsqueeze(1), att).squeeze(1)\n\n att_weight = torch.cat([soft_weight.unsqueeze(1), context_weight.unsqueeze(1)], dim=1)\n decision_weight = torch.bmm(weight.unsqueeze(1), att_weight).squeeze(1)\n\n return att_res, weight, decision_weight\n\nclass ContextAttCore(nn.Module):\n def __init__(self, opt):\n super(ContextAttCore, self).__init__()\n self.drop_prob_lm = opt.drop_prob_lm\n\n self.attention = Attention(opt)\n\n self.context = ContextAttention(opt)\n\n self.decision = DecisionNetwork(opt)\n\n self.soft_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size, opt.rnn_size)\n\n self.lang_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size, opt.rnn_size)\n\n def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):\n prev_h = state[0][-1] # hidden state and the last layer\n\n soft_att, soft_weight = self.attention(prev_h, att_feats, p_att_feats, att_masks)\n\n soft_lstm_input = torch.cat([xt, soft_att], 1)\n\n h_soft, c_soft = self.soft_lstm(soft_lstm_input, (state[0][-1], state[1][-1]))\n\n #context_att, context_weight = self.context(soft_weight, prev_h, att_feats, p_att_feats)\n context_att, context_weight = self.context(soft_weight, h_soft, att_feats, p_att_feats, soft_att)\n\n att, decision_weight, att_weight = self.decision(soft_att, context_att, soft_weight, context_weight, h_soft)\n\n lang_lstm_input = torch.cat([xt, att], 1) # (50, 1024)\n\n # h_lang: (50, 512)\n h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][-1], state[1][-1]))\n\n output = F.dropout(h_lang, self.drop_prob_lm, self.training)\n state = (h_lang.unsqueeze(0), c_lang.unsqueeze(0))\n\n weights = {}\n weights['soft_weight'] = soft_weight\n weights['context_weight'] = context_weight\n weights['decision_weight'] = decision_weight\n weights['att_weight'] = att_weight\n #weights['KL'] = nn.KLDivLoss()(soft_weight, context_weight)\n\n return output, state, weights\n\nclass ContextAttModel(AttModel):\n def __init__(self, opt):\n super(ContextAttModel, self).__init__(opt, fc_embed_flag=False)\n self.num_layers = 1\n self.core = ContextAttCore(opt)\n\n\n\n\n\n\n","sub_path":"models_att/ContextModel.py","file_name":"ContextModel.py","file_ext":"py","file_size_in_byte":11792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"433522835","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 6 09:45:40 2018\r\n\r\n@author: Rune\r\n\"\"\"\r\n#import json_request as a\r\nimport time\r\nimport requests\r\n#GET https://api.thingspeak.com/channels/638714/feeds.json?api_key=SHM8B3NMWV23AQHZ&results=2\r\n#channel = \"638714\"\r\n#api_key = \"SHM8B3NMWV23AQHZ\"\r\ndef urget(ch, api, rs=\"results=30\"):\r\n url = 'https://api.thingspeak.com/channels/'+ str(ch) +'/feeds.json?api_key='+ str(api) + '&' + str(rs)\r\n r = requests.get(url)\r\n print(\"Status: \", r.status_code)\r\n return r.json()\r\n\r\n#API KEY: ANJWFUJKQ85EI4NR\r\n#Channel: 633219\r\n#Field2: Data\r\n#Field3: Grader\r\n#Field4: DATO\r\nglobal lasttime\r\napi_key = \"IHKIXT6UK0F8STZC\"\r\nchannel = \"645235\"\r\ndegree = 90\r\n#url = \"https://thingspeak.com/channels/437741/feed.json\"\r\naction = 'live' # test or live\r\n#if action == 'test':\r\n# topr = a.urget(channel, api_key, \"results=130\")\r\n#if action == 'live':\r\ntopr = urget(channel, api_key, \"results=13\")\r\n\r\ndef run_s(action=action, topr=topr):\r\n if action == 'test':\r\n inputs_check = []\r\n begining = []\r\n global checklist\r\n for a in topr['feeds']:\r\n time_st = a['created_at']\r\n deg = a['field2']\r\n distance = float(a['field1'])\r\n distance = int(distance)\r\n uid = a['entry_id']\r\n if deg == '0' and uid not in begining:\r\n begining.append(uid) \r\n zero_found = 1\r\n for i in begining:\r\n if i not in checklist:\r\n checklist.append(i)\r\n for a in topr['feeds']:\r\n time_st = a['created_at']\r\n deg = a['field2']\r\n distance = float(a['field1'])\r\n distance = int(distance)\r\n uid = a['entry_id']\r\n \r\n \r\n \r\n #print(b)\r\n elif action == \"live\":\r\n newlist = []\r\n \r\n for a in topr['feeds']:\r\n \r\n newtime = a['created_at']\r\n deg = a['field2']\r\n #print(deg)\r\n distance = float(a['field1'])\r\n distance = int(distance)\r\n newlist.append(distance)\r\n return newlist, newtime\r\n# try:\r\n# if newtime == lasttime:\r\n# #return 'non'\r\n# return newlist\r\n# \r\n# else:\r\n# lasttime = newtime\r\n# return newlist \r\n# \r\n# \r\n# except:\r\n# lasttime = newtime \r\n \r\n #topr = a.urget(channel, api_key, \"results=13\")\r\n \r\n#list_n, timest = run_s()\r\n#print(timest)\r\n#print(list_n)\r\n \r\nif action == 'test':\r\n begining = run_s(action, topr)\r\n #for a in sorted(begining, reverse=True):\r\n #print(begining)\r\n for a in begining:\r\n #print(a, \"a-loop\")\r\n for b in topr['feeds']:\r\n uid = b['entry_id']\r\n #print(uid, \"b-loop\")\r\n if uid >= a and uid <= a + 12:\r\n print(b['field2'], b['field1'], b['created_at'])\r\n \r\n #print(b['field1'])\r\n #print(b['entry_id']) \r\n \r\n#topr = a.urget(channel, api_key)\r\n#print(topr)\r\n#a = 'feeds'\r\n#for a in topr.keys():\r\n# print(a)\r\n#print(topr['feeds'])\r\n#for key, value in topr.items() :\r\n # print(key, value)\r\n#print(topr('feeds'))\r\n#a.savefile(\"datapull.json\", topr)\r\n#top = a.openfile(\"datapull.json\")\r\n#a.cc(top)","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"45665752","text":"def collatz(number):\n if number % 2 == 0:\n return number // 2\n else:\n return number * 3 + 1\n\ntestInt = 0\nwhile testInt == 0:\n try:\n print('Please enter an integer:')\n number = int(input())\n testInt = 1\n except ValueError:\n print('You must enter an integer.')\n \nwhile number != 1:\n number = collatz(number)\n print(number)\n\n \n\n \n","sub_path":"ABS/part_1/archieve/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"231461435","text":"from random import *\r\nfrom subprocess import call\r\nfrom os import system, name\r\nfrom time import sleep\r\nimport msvcrt\r\n\r\nFPS = 60\r\nWIDTH = 32\r\nHEIGH = 16\r\nX_INDEX = 0\r\nY_INDEX = 1\r\nsnake = [[0,0],[1,0],[2,0]]\r\nmatrix = [[]]\r\nfood_position = [5,5]\r\nvelocity = [-1,0]\r\nSCORE = 0\r\nkey = 0\r\n\r\ndef clear(): \r\n # for windows \r\n if name == 'nt': \r\n _ = system('cls') \r\n # for mac and linux(here, os.name is 'posix') \r\n else: \r\n _ = system('clear') \r\n\r\ndef startMaze():\r\n print('|',end='')\r\n for start in range(WIDTH):\r\n print('-',end='')\r\n print('|')\r\n\r\ndef endMaze():\r\n print('|',end='')\r\n for end in range(WIDTH):\r\n print('-',end='')\r\n print('|')\r\n\r\ndef drawSnake(x,y,area=1):\r\n canDraw = False\r\n for coord in snake:\r\n if(x == coord[X_INDEX] and y == coord[Y_INDEX]):\r\n canDraw = True\r\n\r\n snake_x = snake[0][X_INDEX]\r\n snake_y = snake[0][Y_INDEX]\r\n if snake_x < 0: snake_x = WIDTH\r\n if snake_y < 0: snake_y = HEIGH\r\n if snake_x > WIDTH: snake_x = 0\r\n if snake_y > HEIGH: snake_y = 0\r\n\r\n\r\n if(canDraw):\r\n if (area == 2 and snake_x == x and snake_y == y):\r\n #respawnFood()\r\n print('=',end='')\r\n else:\r\n print('*',end='')\r\n else:\r\n print(' ',end='')\r\n\r\ndef respawnFood():\r\n index_x = 0\r\n index_y = 0\r\n for i in matrix:\r\n for j in i:\r\n available = matrix[index_y][index_x]\r\n if(available == 2):\r\n matrix[index_y][index_x] = 1\r\n index_x += 1\r\n index_y += 1\r\n print('spawned food')\r\n\r\ndef createMaze():\r\n matrix.clear()\r\n startMaze()\r\n foodReady = False\r\n for row in range(HEIGH):\r\n print('|',end='')\r\n colList = []\r\n for col in range(WIDTH):\r\n drawCol = False\r\n available = 0\r\n if(drawCol):\r\n print('-',end='')\r\n available = 0\r\n else:\r\n if(random()*100 >= 50 and foodReady == False):\r\n foodReady = True\r\n print('+',end='')\r\n available = 2\r\n else:\r\n print(' ',end='')\r\n available = 1\r\n colList.append(available)\r\n print('|')\r\n matrix.append(colList)\r\n endMaze()\r\n print('end')\r\n\r\ndef redraw():\r\n startMaze()\r\n x=0\r\n y=0\r\n for row in matrix:\r\n print('|',end='')\r\n for col in row:\r\n if(col == 1 or col == 2):\r\n drawSnake(x,y,col)\r\n else:\r\n print('-',end='')\r\n x+=1\r\n x=0\r\n y+=1\r\n print('|')\r\n\r\n endMaze()\r\n\r\ndef updateVelocity():\r\n print(f'Velocity : {velocity}')\r\n print(f'Head : {snake[0]}')\r\n print(f'Score : {SCORE}')\r\n print(f'FPS : {FPS}')\r\n snake.insert(0, [snake[0][X_INDEX] + (velocity[X_INDEX]), snake[0][Y_INDEX] + velocity[Y_INDEX]])\r\n snake.pop()\r\n\r\ndef onKeyPressed():\r\n key = ord(msvcrt.getch())\r\n if(key==72): # UP\r\n velocity[0] = 0\r\n velocity[1] = -1\r\n if(key==80): # DOWN\r\n velocity[0] = 0\r\n velocity[1] = 1\r\n if(key==77): # RIGHT\r\n velocity[0] = 1\r\n velocity[1] = 0\r\n if(key==75): # LEFT\r\n velocity[0] = -1\r\n velocity[1] = 0\r\n \r\n\r\ncreateMaze()\r\nwhile True:\r\n clear()\r\n updateVelocity()\r\n redraw()\r\n onKeyPressed()\r\n sleep(1/FPS)","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"612379011","text":"# pacmanAgents.py\n# ---------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n#\n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nfrom pacman import Directions\nfrom game import Agent\nfrom heuristics import *\nimport random\nimport math\n\nclass RandomAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n # get all legal actions for pacman\n actions = state.getLegalPacmanActions()\n # returns random action from all the valide actions\n return actions[random.randint(0,len(actions)-1)]\n\nclass RandomSequenceAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n self.actionList = [];\n for i in range(0,10):\n self.actionList.append(Directions.STOP);\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n # get all legal actions for pacman\n possible = state.getAllPossibleActions();\n for i in range(0,len(self.actionList)):\n self.actionList[i] = possible[random.randint(0,len(possible)-1)];\n tempState = state;\n for i in range(0,len(self.actionList)):\n if tempState.isWin() + tempState.isLose() == 0:\n tempState = tempState.generatePacmanSuccessor(self.actionList[i]);\n else:\n break;\n # returns random action from all the valide actions\n return self.actionList[0];\n\nclass GreedyAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n # get all legal actions for pacman\n legal = state.getLegalPacmanActions()\n # get all the successor state for these actions\n successors = [(state.generatePacmanSuccessor(action), action) for action in legal]\n # evaluate the successor states using scoreEvaluation heuristic\n scored = [(scoreEvaluation(state), action) for state, action in successors]\n # get best choice\n bestScore = max(scored)[0]\n # get all actions that lead to the highest score\n bestActions = [pair[1] for pair in scored if pair[0] == bestScore]\n # return random action from the list of the best actions\n return random.choice(bestActions)\n\n\nclass HillClimberAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n self.root = state\n directions = self.root.getAllPossibleActions()\n score = 0\n flag = False\n main_action = Directions.STOP\n current = self.generateNew()\n\n while True:\n tempState = state\n for i in range(0, len(current)):\n if tempState.isWin() + tempState.isLose() == 0:\n successor = tempState.generatePacmanSuccessor(current[i])\n if successor == None:\n flag = True\n break\n tempState = successor\n else:\n break\n if flag == True:\n break\n\n if (scoreEvaluation(tempState)>score):\n score = scoreEvaluation(tempState)\n main_action = current[0]\n\n for index,action in enumerate(current):\n test = random.randint(0,100)\n if (test>50):\n current[index] = random.choice(directions)\n\n return main_action\n\n\n def generateNew(self):\n new = []\n directions = self.root.getAllPossibleActions()\n\n for i in range(0, 5):\n new.append(random.choice(directions))\n return new\n\n\nclass GeneticAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return Directions.STOP\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n self.root = state\n\n directions = self.root.getAllPossibleActions()\n generation = []\n for i in range(0,8):\n generation.append(self.generateChromosome())\n\n new_score = []\n flag = False\n while True:\n score = []\n for child in generation:\n tempState = state\n for i in range(0,len(child)):\n\n if tempState.isWin() + tempState.isLose() == 0:\n successor = tempState.generatePacmanSuccessor(child[i])\n if successor == None:\n flag = True\n break\n tempState = successor\n else:\n break\n if flag == True:\n break\n score.append((scoreEvaluation(tempState),child))\n\n if flag == True:\n break\n score.sort(key=lambda x: x[0])\n new_score = score\n a = []\n for i in range(0,8):\n for j in range(0,i+1):\n a.append(j+1)\n\n score1,p1 = score[random.choice(a)-1]\n score2,p2 = score[random.choice(a)-1]\n\n test1 = random.randint(0,100)\n if (test1<=70):\n c1,c2 = self.crossOver(p1,p2)\n for index,child in enumerate(generation):\n if child == p1 :\n generation[index] = c1\n if child == p2:\n generation[index] = c2\n\n for child in generation:\n test3 = random.randint(0,100)\n if (test3 <= 10):\n child[random.randint(0,4)]=random.choice(directions)\n\n scores,path = new_score.pop()\n return path[0]\n\n def generateChromosome(self):\n chromosome = []\n directions = self.root.getAllPossibleActions()\n for i in range(0,5):\n chromosome.append(random.choice(directions))\n return chromosome\n\n def crossOver(self,p1,p2):\n c = []\n for i in (0,2):\n x = []\n for j in range(0,len(p1)):\n test = random.randint(0,100)\n if(test<50):\n x.append(p1[j])\n else:\n x.append(p2[j])\n c.append(x)\n return c[0],c[1]\n\n\n\nclass MCTSAgent(Agent):\n # Initialization Function: Called one time when the game starts\n\n def registerInitialState(self, state):\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n\n\n ## Tree class to make the Tree\n class Tree_Node():\n def __init__(self,action=None,parent=None):\n self.visits = 0\n self.reward = 0.0\n self.action = action\n self.children = []\n self.parent = parent\n\n ### Add a child to the given node at the given action\n def add_child(self, action):\n child = Tree_Node(action,self)\n self.children.append(child)\n\n ### get_child - to get the child of the current node from the given action\n def get_child(self,action):\n for child in self.children:\n if child.action == action:\n return child\n\n ### Back propagation of reward and visits till the Root\n def back_prop(node, reward):\n while node != None:\n node.visits += 1\n node.reward += reward\n node = node.parent\n return\n\n self.root = state\n Root = Tree_Node()\n Root.add_child(None)\n\n None_Flag = False\n while True:\n if None_Flag == True: break\n current_node = Root\n current_state = self.root\n while True:\n Start_from_root = False\n actions = current_state.getLegalPacmanActions()\n UCT = 0\n chosen_action = Directions.STOP\n for action in actions:\n temp_term = current_state.generatePacmanSuccessor(action)\n if temp_term == None:\n None_Flag = True,\n break\n if (temp_term.isWin() + temp_term.isLose() == 0):\n cur = current_node.get_child(action)\n if cur == None:\n current_node.add_child(action)\n current_node.get_child(action).back_prop(self.Rollout(temp_term))\n Start_from_root = True\n break\n else:\n temp = self.UCT(cur)\n if (temp>UCT):\n UCT = temp\n chosen_action = action\n else:\n chosen_action = Directions.STOP\n continue\n\n if Start_from_root== True: break\n if None_Flag == True: break\n current_state = current_state.generatePacmanSuccessor(chosen_action)\n if current_state == None:\n None_Flag = True\n break\n current_node = current_node.get_child(chosen_action)\n if current_node == None: break\n\n temp = 0\n action = Directions.STOP\n for node in Root.children:\n if node.visits>temp:\n action = node.action\n return action\n\n def UCT(self, node):\n value = node.reward / float(node.visits) + math.sqrt(2 * math.log(node.parent.visits) / float(node.visits))\n return value\n\n def Rollout(self,cur):\n current = cur\n for i in range(0, 5):\n if (current.isLose() + current.isWin() != 0):\n return normalizedScoreEvaluation(self.root, current)\n else:\n action = random.choice(current.getAllPossibleActions())\n successor = current.generatePacmanSuccessor(action)\n if successor == None: break\n current = successor\n return normalizedScoreEvaluation(self.root, current)\n\nclass BFSAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n queue = []\n flag = False\n legal = state.getLegalPacmanActions()\n depth = 1\n for action in legal:\n path = state.generatePacmanSuccessor(action)\n queue.append((path, depth, action))\n while queue:\n temp = queue.pop(0)\n current, depth, action = temp\n legal = current.getLegalPacmanActions()\n if (current.isWin()):\n return action\n if (current.isLose()):\n continue\n for next in legal:\n successor = current.generatePacmanSuccessor(next)\n if (successor == None):\n flag = True\n break\n else:\n queue.append((successor, depth+1, action))\n if(flag):\n break\n\n bestAction = Directions.STOP\n scored = [(scoreEvaluation(states), depth, action) for states, depth, action in queue]\n if (scored != None):\n ## Finding the maximum score\n bestScore = max(scored)[0]\n scored1 = [(score, depth, action) for score, depth, action in scored if score == bestScore]\n bestAction = min(scored1, key=lambda item: item[1])[2] ## Finding the action corresponding to the min depth\n return bestAction\n\n\nclass DFSAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n queue = []\n flag = False\n legal = state.getLegalPacmanActions()\n depth = 1\n for action in legal:\n path = state.generatePacmanSuccessor(action)\n queue.append((path, depth, action))\n while queue:\n temp = queue.pop()\n current, depth, action = temp\n legal = current.getLegalPacmanActions()\n if (current.isWin()):\n return action\n if (current.isLose()):\n continue\n for next in legal:\n successor = current.generatePacmanSuccessor(next)\n if (successor == None):\n flag = True\n break\n else:\n queue.append((successor, depth + 1, action))\n if (flag):\n break\n\n bestAction = Directions.STOP\n scored = [(scoreEvaluation(states), depth, action) for states, depth, action in queue]\n if (scored != None):\n ## Finding the maximum score\n bestScore = max(scored)[0]\n scored1 = [(score, depth, action) for score, depth, action in scored if score == bestScore]\n bestAction = min(scored1, key=lambda item: item[1])[2] ## Finding the action corresponding to the min depth\n return bestAction\n\n\nclass AStarAgent(Agent):\n # Initialization Function: Called one time when the game starts\n def registerInitialState(self, state):\n return;\n\n # GetAction Function: Called with every frame\n def getAction(self, state):\n flag = False # used to set the flag when None type is returned from getPacmanSuccessor\n successors = []\n legal = state.getLegalPacmanActions()\n depth = 1\n for action in legal:\n path = state.generatePacmanSuccessor(action)\n cost = depth - (scoreEvaluation(path) - scoreEvaluation(state))\n successors.append((cost, path, action, depth))\n while (successors):\n if (flag):\n break\n successors.sort()\n cost, cur, action, depth = successors.pop(0)\n if (cur.isWin()):\n return action\n if (cur.isLose()):\n continue\n legal = cur.getLegalPacmanActions()\n for next in legal:\n successor = cur.generatePacmanSuccessor(next)\n if (successor == None):\n flag = True\n break\n cost = (depth + 1) - (scoreEvaluation(successor) - scoreEvaluation(state))\n successors.append((cost, successor, action, depth + 1))\n\n # If no terminal state, return the action leading to the node with\n # the best score and no children based on the heuristic function (scoreEvaluation)\n bestAction = Directions.STOP\n scored = [(scoreEvaluation(states), depth, action) for cost, states, action, depth in successors]\n ## Finding the maximum score\n if (scored != None):\n bestScore = max(scored)[0]\n scored1 = [(score, depth, action) for score, depth, action in scored if score == bestScore]\n bestAction = min(scored1, key=lambda item: item[1])[2] ## Finding the action corresponding to the min depth\n return bestAction\n\n\n","sub_path":"pacmanAgents.py","file_name":"pacmanAgents.py","file_ext":"py","file_size_in_byte":16276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"374205969","text":"import time\n\n# this python library needs to be imported in the raspberry pi\nimport RPi.GPIO as GPIO\n\n\ndelay = 0.5\npin = None\n\n\n# Subject to change according to the pins used, therefore set generic and set by the constructor\ndef construct(out_pin):\n pin = out_pin\n # Choosing the standard board numbering\n GPIO.setmode(GPIO.BOARD)\n # setting the out pin\n GPIO.setup(out_pin, GPIO.OUT)\n\n\n# This method will let the bulb blink if the car is moving backward\ndef blink(ismovingback):\n while ismovingback:\n GPIO.output(pin, GPIO.HIGH)\n time.sleep(delay)\n GPIO.output(pin, GPIO.LOW)\n time.sleep(delay)\n GPIO.cleanup()\n","sub_path":"LED.py","file_name":"LED.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"86526625","text":"#!/usr/bin/env python\nfrom flask import Flask, jsonify, abort\nimport csv\n\napp = Flask(__name__)\n\nwith open('players.csv', mode='r') as infile:\n players = csv.DictReader(infile, delimiter=',')\n players = list(players)\n\n\n@app.route('/', methods=['GET'])\ndef api_root():\n return 'Welcome'\n\n\n@app.route('/api/players', methods=['GET'])\ndef api_players():\n return jsonify(players)\n\n\n@app.route('/api/player/', methods=['GET'])\ndef api_player(pid):\n for player in players:\n if player['playerId'] == pid:\n return jsonify(player)\n abort(404)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)\n","sub_path":"application/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"216832609","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom workalendar.core import WesternCalendar, ChristianMixin\nfrom workalendar.registry import iso_register\n\n\n@iso_register('BG')\nclass Bulgaria(WesternCalendar, ChristianMixin):\n name = 'Bulgaria'\n\n FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (\n (3, 3, \"Liberation Day\"), # Ден на Освобождението на Б\n (5, 1, \"International Workers' Day\"), # Ден на труда и на междунар\n (5, 6, \"Saint George's Day\"), # Гергьовден, ден на храброс\n (5, 24, \"Saints Cyril & Methodius Day\"), # Ден на българската просвет\n (9, 6, \"Unification Day\"), # Ден на Съединението\n (9, 22, \"Independence Day\"), # Ден на независимостта на Б\n # wikipedia says Non-attendance day for schools, otherwise a working da\n # (11, 1, \"National Awakening Day\"), # Ден на народните будители\n\n )\n\n include_easter_sunday = True\n include_easter_monday = True\n include_christmas_eve = True # Бъдни вечер\n include_christmas = True # Рождество Христово\n include_boxing_day = True\n\n # wikipedia says The Bulgarians have two days of Christmas,\n # both called Christmas Day\n boxing_day_label = \"Christmas\"\n","sub_path":"workalendar/europe/bulgaria.py","file_name":"bulgaria.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"509875380","text":"import cv2\nimport numpy as np\n\n# Constants for filtering adequate contours (represented as a fraction of the image area)\nCONTOUR_SIZE_BOUND = 1 / 40\n\n\n# Define constants for red hsv ranges\nRED_MIN_LOW = np.array([0, 100, 40], np.uint8)\nRED_MAX_LOW = np.array([8, 255, 255], np.uint8)\nRED_MIN_HIGH = np.array([172, 100, 40], np.uint8)\nRED_MAX_HIGH = np.array([180, 255, 255], np.uint8)\n\n# Define constants for yellow hsv ranges\nYELLOW_MIN = np.array([15,60, 60], np.uint8)\nYELLOW_MAX = np.array([35, 255, 255], np.uint8)\n\n# Define constants for blue hsv ranges\nBLUE_MIN = np.array([90, 120, 80], np.uint8)\nBLUE_MAX = np.array([130, 255, 255], np.uint8)\n\n\nclass Shape:\n\n def __init__(self, frame, contour):\n self.frame = frame\n self.contour = contour\n\n\ndef detect(camera):\n # Crop the frame\n frame = crop_frame(camera.frame, camera.rect)\n\n # Threshold the image to extract specific colours\n red, yellow, blue = extract_colours(frame)\n\n # Detect the contours\n red_con, yellow_con, blue_con = detect_shape(red), detect_shape(yellow), detect_shape(blue)\n\n # Reuse red, yellow and blue variables to draw contours on an actual image\n red = frame.copy()\n yellow = frame.copy()\n blue = frame.copy()\n\n # Draw the contours if a valid contour is present\n if red_con is not False:\n cv2.drawContours(red, red_con, -1, (255, 0, 255), 2)\n\n if yellow_con is not False:\n cv2.drawContours(yellow, yellow_con, -1, (255, 0, 255), 2)\n\n if blue_con is not False:\n cv2.drawContours(blue, blue_con, -1, (255, 0, 255), 2)\n\n # Display the frames\n # cv2.imshow(\"Red\", red)\n # cv2.imshow(\"Yellow\", yellow)\n # cv2.imshow(\"Blue\", blue)\n\n # Create new Shape objects\n red = Shape(red, red_con)\n yellow = Shape(yellow, yellow_con)\n blue = Shape(blue, blue_con)\n\n # Return the frames with colour and shape recognised, as well as the contours\n return red, yellow, blue\n\n\ndef crop_frame(frame, rect):\n # Divide the rectangle into parts\n y1 = rect[0][1]\n y2 = rect[1][1]\n x1 = rect[0][0]\n x2 = rect[1][0]\n\n # Crop the frame\n frame = frame[y1:y2, x1:x2]\n\n # Return the cropped frame\n return frame\n\n\ndef extract_colours(frame):\n\n # Convert the frame to hsv\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Thresh the frame to find colours, using defined ranges\n blue = cv2.inRange(hsv, BLUE_MIN, BLUE_MAX)\n yellow = cv2.inRange(hsv, YELLOW_MIN, YELLOW_MAX)\n\n # Thresh red colour using combined ranges\n red_low = cv2.inRange(hsv, RED_MIN_LOW, RED_MAX_LOW)\n red_high = cv2.inRange(hsv, RED_MIN_HIGH, RED_MAX_HIGH)\n red = cv2.addWeighted(red_low, 1.0, red_high, 1.0, 0.0)\n\n # Display the frames with colours extracted\n # cv2.imshow(\"Red thresh\", red)\n # cv2.imshow(\"Yellow thresh\", yellow)\n # cv2.imshow(\"Blue thresh\", blue)\n\n # Return new frames with extracted colours\n return red, yellow, blue\n\n\ndef detect_shape(frame):\n # Store the resolution\n res = frame.shape\n\n # Store the area of the window to use it when filtering the contours\n area = res[0] * res[1]\n\n # Find contours inside the frame\n contours = cv2.findContours(frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]\n\n # Initialise the contour to be displayed\n contour = 0\n\n # Iterate through each contour in the threshed image\n for cnt in contours:\n\n # Calculate the area of the shape\n cnt_area = cv2.contourArea(cnt)\n\n # Do not consider any areas smaller than the fraction of the image\n if not cnt_area < area * CONTOUR_SIZE_BOUND:\n # If important contour is empty (is int), assign a contour to the field\n if type(contour) == int:\n contour = cnt\n # If important contour is a contour, then calculate the areas and put the bigger one in the field\n elif cnt_area > cv2.contourArea(contour):\n contour = cnt\n\n # Return the contour if a valid one was detected, otherwise return False\n if type(contour) != int:\n return contour\n else:\n return False\n","sub_path":"surface-master/vision_software/shape_recognition.py","file_name":"shape_recognition.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"485473633","text":"from collections import defaultdict\n\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\nfrom utils import save_pretrained, get_loader\nfrom vocab import Vocab\n\nBOS_TOKEN = \"\"\nEOS_TOKEN = \"\"\nPAD_TOKEN = \"\"\n\n\ndef load_reuters():\n from nltk.corpus import reuters\n text = reuters.sents()\n text = [[word.lower() for word in sentence] for sentence in text]\n vocab = Vocab.build(text, reserved_tokens=[BOS_TOKEN, EOS_TOKEN, PAD_TOKEN])\n corpus = [vocab.convert_tokens_to_idx(sentence) for sentence in text]\n return corpus, vocab\n\n\nclass GloveDataset(Dataset):\n def __init__(self, corpus, vocab, context_size=2):\n self.cooccur_counts = defaultdict(float)\n self.bos = vocab[BOS_TOKEN]\n self.pos = vocab[EOS_TOKEN]\n for sentence in tqdm(corpus, desc=\"Dataset Construction\"):\n print([self.bos])\n print([self.eos])\n sentence = [self.bos] + sentence + [self.eos]\n for i in range(1, len(sentence) - 1):\n w = sentence[i]\n left_contexts = sentence[max(0, i - context_size): i]\n right_contexts = sentence[i + 1: min(len(sentence), i + context_size) + 1]\n for k, c in enumerate(left_contexts[::-1]):\n self.cooccur_counts[(w, c)] += 1 / (k + 1)\n for k, c in enumerate(right_contexts):\n self.cooccur_counts[(w, c)] += 1 / (k + 1)\n self.data = [(w, c, count) for (w, c), count in self.cooccur_counts.items()]\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n return self.data[item]\n\n def collate_fn(self, examples):\n words = torch.tensor([ex[0] for ex in examples])\n contexts = torch.tensor([ex[1] for ex in examples])\n counts = torch.tensor([ex[2] for ex in examples])\n return (words, contexts, counts)\n\n\nclass GloveModel(nn.Module):\n def __init__(self, vocab_size, embedding_dim):\n super(GloveModel, self).__init__()\n self.w_embeddings = nn.Embedding(vocab_size, embedding_dim=embedding_dim)\n self.w_biases = nn.Embedding(vocab_size, 1)\n\n self.c_embeddings = nn.Embedding(vocab_size, embedding_dim=embedding_dim)\n self.c_biases = nn.Embedding(vocab_size, 1)\n\n def forward_w(self, words):\n w_embeds = self.w_embeddings(words)\n w_biases = self.w_biases(words)\n return w_embeds, w_biases\n\n def forward_c(self, contexts):\n c_embdes = self.c_embeddings(contexts)\n c_biases = self.c_biases(contexts)\n return c_embdes, c_biases\n\n\nembedding_dim = 128\nhidden_dim = 256\nbatch_size = 1024\ncontext_size = 3\nnum_epoch = 10\nm_max = 100\nalpha = 0.75\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ncorpus, vocab = load_reuters()\ndataset = GloveDataset(corpus, vocab, context_size=context_size)\ndataloader = get_loader(dataset, batch_size)\n\nmodel = GloveModel(len(vocab), embedding_dim)\nmodel.to(device)\noptimizer = optim.Adam(model.parameters(), lr=0.001)\n\nmodel.train()\nfor epoch in range(num_epoch):\n total_loss = 0\n for batch in tqdm(dataloader, desc=f\"Training Epoch {epoch}\"):\n words, contexts, counts = [x.to(device) for x in batch]\n word_embeds, word_biases = model.forward_w(words)\n context_embeds, context_biases = model.forward_c(contexts)\n log_counts = torch.log(counts)\n\n weight_factor = torch.clamp(torch.pow(counts / m_max, alpha), max=1.0)\n optimizer.zero_grad()\n\n loss = (torch.sum(word_embeds * context_embeds, dim=1) + word_biases + context_biases - log_counts) ** 2\n wavg_loss = (weight_factor * loss).mean()\n wavg_loss.backward()\n optimizer.step()\n total_loss += wavg_loss.item()\n\n print(f\"Loss: {total_loss:.2f}\")\n\ncombined_embeds = model.w_embeddings.weight + model.c_embeddings.weight\nsave_pretrained(vocab, combined_embeds.data, \"glove.vec\")\n","sub_path":"nlp/pre-train-model/ptm-5-3.py","file_name":"ptm-5-3.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"614736848","text":"from __future__ import division;\nimport scipy.special as sp;\nimport numpy as np;\nimport matplotlib.pyplot as pl;\n\ndef mvn_sample(mu, k, nsamples):\n x = np.random.multivariate_normal(mu,k,nsamples);\n return x.T;\n#---Kernel Functions\ndef kConstant(x,y): #Constant\n v = 2; #Variance\n return v;\ndef kLinear(x,y): #Linear\n v = 0.01; #Variance\n return v*(x*y);\ndef kBrownian(x,y): #Brownian Motion\n v = 1.5; #Variance\n return v*min(x,y);\ndef kSq_Exp(x,y): #Squared Exponential (aka Gaussian Kernel)\n v, l = 2, 5; #Variance & Characteristic Length Scale\n return (v**2) * np.exp(-(x-y)**2/(2.0*l**2));\ndef kOrn_Uhl(x,y): #Ornstein-Uhlenbeck (aka Laplace Kernel)\n v, l = 2, 5; #Variance & Characteristic Length Scale\n return (v**2) * np.exp(-np.abs(x-y)/l);\ndef kPeriodic(x,y): #Periodic\n v, l = 2, 5; #Variance & Characteristic Length Scale\n return (v**2) * np.exp(-(2*np.sin((x-y)/2)**2)/l**2);\ndef kSymmetric(x,y): #Symmetric\n v = 2; #Variance\n return (v**2) * np.exp(-min(abs(x-y),abs(x+y))**2);\ndef kGaussian_Noise(x,y): #Gaussian Noise\n stdev = 1; #Standard Deviation\n kron_delta = 1 if x==y else 0;\n return stdev*kron_delta;\ndef kMatern(x,y): #Matern\n v, b, l = 1, 6, 1; #Variance, Parameter Beta & Characteristic Length Scale\n k1 = 2**(1-b)/sp.gamma(b);\n dl = (np.sqrt(2*b)*abs(x-y))/l;\n if dl==0: return 0;\n k2 = (dl**b)*sp.kn(b,dl);\n return (v**2)*k1*k2;\ndef kRationalQuadratic(x,y): #Rational Quadratic\n v, a = 2, 1; #Variance & Parameter Alpha\n return (v**2)*(1+abs(x-y)**2)**-a;\n#---Mean Functions\ndef mZero(x):\n return 0;\ndef mLinear(x):\n return x*2;\ndef mAbsolute(x):\n return abs(x)*10;\ndef mSquared(x):\n return x**2;\ndef mCube(x):\n return x**3;\ndef mAsymptote(x):\n return 1.0/x;\ndef mSin(x):\n return np.sin(x);\ndef mDiscrete(x):\n return int(x)*2;\n\ndef getKernel_PostPred(nOpc, t, aData):\n x = aData[0];\n y = aData[1]; # @UnusedVariable\n k_11 = getKernel(nOpc, x); #Kernel of Data\n k_12 = getKernel(nOpc, t, x); #Kernel of Data-Queries\n k_22 = getKernel(nOpc, t); #Kernel of Queries\n invK_11 = np.linalg.pinv(k_11);\n prod_k = np.dot(k_12, invK_11);\n k_out = k_22 - np.dot(prod_k, k_12.T);\n return k_out;\n\ndef getMean_PostPred(nOpc, t, aData):\n x = aData[0];\n y = aData[1];\n mu_t = getMean(nOpc, t).reshape(-1,1);\n mu_x = getMean(nOpc, x).reshape(-1,1);\n k_11 = getKernel(nOpc, x); #Kernel of Data\n k_12 = getKernel(nOpc, t, x); #Kernel of Data-Queries\n invK_11 = np.linalg.pinv(k_11);\n prod_k = np.dot(k_12, invK_11);\n mu_out = mu_t + np.dot(prod_k, (y - mu_x));\n return mu_out;\n \n\ndef getKernel(nOpc, t, t2=None):\n kFunc = {0 : kConstant,\n 1 : kLinear,\n 2 : kBrownian,\n 3 : kSq_Exp,\n 4 : kOrn_Uhl,\n 5 : kPeriodic,\n 6 : kSymmetric,\n 7 : kGaussian_Noise,\n 8 : kMatern,\n 9 : kRationalQuadratic};\n n = len(t);\n n2 = len(t2) if t2!=None else n;\n t2 = t2 if t2!=None else t;\n k = np.zeros((n,n2));\n for i in range(n):\n for j in range(n2):\n k[i,j] = kFunc[nOpc](t[i], t2[j]);\n return k;\n\ndef getMean(nOpc, t):\n mFunc = {0 : mZero,\n 1 : mLinear,\n 2 : mAbsolute,\n 3 : mSquared,\n 4 : mCube,\n 5 : mAsymptote,\n 6 : mSin,\n 7 : mDiscrete};\n n = len(t);\n mu = np.zeros((1,n))[0];\n for i in range(n):\n mu[i] = mFunc[nOpc](t[i]);\n return mu;\n\ndef plotGP(t, k, mu, aSamples):\n s = aSamples;\n var = k.diagonal();\n pl.gca().fill_between(t.flat, mu-var, mu+var, color=\"#dddddd\"); #Variance\n pl.plot(t,mu, 'r'); #Mean\n for i in range(nSamples):\n pl.plot(t,s[:,i].T, marker='o', ms=5); #Function Samples\n nPad = 5;\n pl.axis([min(t)-nPad, max(t)+nPad, np.min(s)-nPad, np.max(s)+nPad]);\n pl.show();\n \n \nif __name__ == '__main__':\n nSamples = 1;\n t = np.linspace(0,10,50).reshape(-1,1); #Queries x*\n x = np.array([1,3,6]).reshape(-1,1); #Data x\n y = np.array([1,-1,8]).reshape(-1,1); #Data y [or f(x)]\n aData = np.array([x,y]);\n mu = getMean_PostPred(2, t, aData);\n k = getKernel_PostPred(2, t, aData);\n mu = mu.reshape(1,-1)[0];\n s = mvn_sample(mu, k, nSamples);\n pl.plot(x,y, marker='*', ms=20);\n plotGP(t, k, mu, s);\n \n \n# mu = getMean(0, t);\n# k = getKernel(5, t);\n# s = mvn_sample(mu, k, nSamples);\n# plotGP(t, k, mu, s);\n \n ","sub_path":"sandbox/alfredo/Test_GP/Sampler_GP_PostPred.py","file_name":"Sampler_GP_PostPred.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"7067232","text":"\"\"\"Models for Linnworks API response data.\"\"\"\n\nimport datetime as dt\nfrom typing import Any\n\nimport pytz\n\n\ndef parse_date_time(date_time_string: str) -> dt.datetime:\n \"\"\"Return a date time string as datetime.datetime.\"\"\"\n numeric_string = date_time_string.replace(\"Z\", \"\")\n date, time = numeric_string.split(\"T\")\n year, month, day = date.split(\"-\")\n if \".\" in time:\n time, microsecond = time.split(\".\")\n microsecond = microsecond[:6]\n else:\n microsecond = \"0\"\n hour, minute, second = time.split(\":\")\n try:\n date_time = dt.datetime(\n year=int(year),\n month=int(month),\n day=int(day),\n hour=int(hour),\n minute=int(minute),\n second=int(second),\n microsecond=int(microsecond),\n )\n except ValueError:\n raise ValueError(f'Error parsing datestring \"{date_time_string}\".')\n date_time.replace(tzinfo=pytz.utc)\n return date_time\n\n\nclass StockLevelInfo:\n \"\"\"Model for product stock level information.\"\"\"\n\n def __init__(self, stock_level_data: dict[str, Any]):\n \"\"\"Model for product stock level information.\"\"\"\n self.raw = stock_level_data\n self.auto_adjust = stock_level_data[\"AutoAdjust\"]\n self.available = stock_level_data[\"Available\"]\n self.due = stock_level_data[\"Due\"]\n self.in_order_book = stock_level_data[\"InOrderBook\"]\n self.in_orders = stock_level_data[\"InOrders\"]\n self.jit = stock_level_data[\"JIT\"]\n self.last_update_date = parse_date_time(stock_level_data[\"LastUpdateDate\"])\n self.last_update_operation = stock_level_data[\"LastUpdateOperation\"]\n\n self.location_is_fulfillment_center = stock_level_data[\"Location\"][\n \"IsFulfillmentCenter\"\n ]\n self.location_is_warehouse_managed = stock_level_data[\"Location\"].get(\n \"IsWarehouseManaged\"\n )\n self.location_name = stock_level_data[\"Location\"][\"LocationName\"]\n self.location_id = stock_level_data[\"Location\"][\"StockLocationId\"]\n self.location_int_id = stock_level_data[\"Location\"][\"StockLocationIntId\"]\n\n self.minimum_level = stock_level_data[\"MinimumLevel\"]\n self.pending_update = stock_level_data[\"PendingUpdate\"]\n self.sku = stock_level_data[\"SKU\"]\n self.stock_item_id = stock_level_data[\"StockItemId\"]\n self.stock_item_int_id = stock_level_data[\"StockItemIntId\"]\n self.stock_item_purchase_price = stock_level_data[\"StockItemPurchasePrice\"]\n self.stock_level = stock_level_data[\"StockLevel\"]\n self.stock_value = stock_level_data[\"StockValue\"]\n self.unit_cost = stock_level_data[\"UnitCost\"]\n self.row_id = stock_level_data[\"rowid\"]\n\n\nclass InventoryItemImage:\n \"\"\"Model for inventory image information.\"\"\"\n\n def __init__(self, inventory_image: dict[str, Any]):\n \"\"\"Model for inventory image information.\"\"\"\n self.raw = inventory_image\n self.stock_item_id: str = inventory_image[\"StockItemId\"]\n self.image_id: str = inventory_image[\"ImageId\"]\n self.image_url: str = inventory_image[\"ImageUrl\"]\n self.image_thumbnail_url: str = inventory_image[\"ImageThumbnailUrl\"]\n\n\nclass StockItemImage:\n \"\"\"Model for stock item image information.\"\"\"\n\n def __init__(self, stock_item_image: dict[str, Any]):\n \"\"\"Model for stock item image information.\"\"\"\n self.raw = stock_item_image\n self.source: str = stock_item_image[\"Source\"]\n self.full_source: str = stock_item_image[\"FullSource\"]\n self.checksum_value: str = stock_item_image[\"CheckSumValue\"]\n self.image_id: str = stock_item_image[\"pkRowId\"]\n self.is_main: bool = stock_item_image[\"IsMain\"]\n self.sort_order: int = stock_item_image[\"SortOrder\"]\n self.stock_item_id: str = stock_item_image[\"StockItemId\"]\n self.stock_item_int_id: int = stock_item_image[\"StockItemIntId\"]\n\n\nclass ProcessedOrder:\n \"\"\"Model for processed orders.\"\"\"\n\n def __init__(self, processed_order: dict[str, Any]):\n \"\"\"Model for processed orders.\"\"\"\n self.raw = processed_order\n self.order_guid: str = processed_order[\"pkOrderID\"]\n self.received_date: dt.datetime = parse_date_time(\n processed_order[\"dReceivedDate\"]\n )\n self.processed_at: dt.datetime = parse_date_time(\n processed_order[\"dProcessedOn\"]\n )\n self.time_diff = float(processed_order[\"timeDiff\"])\n self.postage_cost = float(processed_order[\"fPostageCost\"])\n self.total_charge = float(processed_order[\"fTotalCharge\"])\n self.postage_cost_ex_tax = float(processed_order[\"PostageCostExTax\"])\n self.subtotal = float(processed_order[\"Subtotal\"])\n self.tax = float(processed_order[\"fTax\"])\n self.total_discount = float(processed_order[\"TotalDiscount\"])\n self.profit_margin = float(processed_order[\"ProfitMargin\"])\n self.country_tax_rate = float(processed_order[\"CountryTaxRate\"])\n self.order_id = str(processed_order[\"nOrderId\"])\n self.status_number = int(processed_order[\"nStatus\"])\n self.currency: str = processed_order[\"cCurrency\"]\n self.tracking_number: str = processed_order[\"PostalTrackingNumber\"]\n self.country: str = processed_order[\"cCountry\"]\n self.source: str = processed_order[\"Source\"]\n self.subsource: str = processed_order[\"SubSource\"]\n self.postal_service: str = processed_order[\"PostalServiceName\"]\n self.reference_number: str = processed_order[\"ReferenceNum\"]\n self.secondary_reference: str = processed_order[\"SecondaryReference\"]\n self.external_reference: str = processed_order[\"ExternalReference\"]\n self.address_1: str = processed_order[\"Address1\"]\n self.address_2: str = processed_order[\"Address2\"]\n self.address_3: str = processed_order[\"Address3\"]\n self.town: str = processed_order[\"Town\"]\n self.region: str = processed_order[\"Region\"]\n self.buyer_phone_number: str = processed_order[\"BuyerPhoneNumber\"]\n self.company: str = processed_order[\"Company\"]\n self.channel_buyer_name: str = processed_order[\"ChannelBuyerName\"]\n self.account_name: str = processed_order[\"AccountName\"]\n self.customer_full_name: str = processed_order[\"cFullName\"]\n self.customer_email_address: str = processed_order[\"cEmailAddress\"]\n self.customer_post_code: str = processed_order[\"cPostCode\"]\n self.paid_at: dt.datetime = parse_date_time(processed_order[\"dPaidOn\"])\n self.cancelled_at: dt.datetime | None = parse_date_time(\n processed_order[\"dCancelledOn\"]\n )\n if self.cancelled_at.year == 1:\n self.cancelled_at = None\n self.item_weight = int(processed_order[\"ItemWeight\"])\n self.total_weight = int(processed_order[\"TotalWeight\"])\n self.hold_or_cancel: bool = processed_order[\"HoldOrCancel\"]\n self.is_resend: bool = processed_order[\"IsResend\"]\n self.is_exchange: bool = processed_order[\"IsExchange\"]\n self.tax_id: str = processed_order[\"TaxId\"]\n self.fulfilment_location_name: str = processed_order[\"FulfilmentLocationName\"]\n\n\nclass OrderAuditTrailEntry:\n \"\"\"Model for order audit trail entries.\"\"\"\n\n def __init__(self, audit_trail_entry: dict[str, Any]):\n \"\"\"Model for order audit trail entries.\"\"\"\n self.raw = audit_trail_entry\n self.history_id: int = audit_trail_entry[\"sid_history\"]\n self.order_guid: str = audit_trail_entry[\"fkOrderId\"]\n self.history_note: str = audit_trail_entry[\"HistoryNote\"]\n self.timestamp: dt.datetime = parse_date_time(audit_trail_entry[\"DateStamp\"])\n self.tag: str = audit_trail_entry[\"Tag\"]\n self.updated_by = audit_trail_entry[\"UpdatedBy\"]\n self.audit_type = audit_trail_entry[\"fkOrderHistoryTypeId\"]\n self.type_description: str = audit_trail_entry[\"TypeDescription\"]\n\n\nclass StockItemHistoryRecord:\n \"\"\"Model for stock item history records.\"\"\"\n\n def __init__(self, stock_item_record: dict[str, Any]):\n \"\"\"Model for stock item history records.\"\"\"\n self.raw = stock_item_record\n self.timestamp = parse_date_time(stock_item_record[\"Date\"])\n self.stock_level = stock_item_record[\"Level\"]\n self.text = stock_item_record[\"Note\"]\n self.relative_change = stock_item_record[\"ChangeQty\"]\n self.stock_item_id = stock_item_record[\"StockItemId\"]\n\n\nclass ChannelLinkedItem:\n \"\"\"Model for channel linked items.\"\"\"\n\n def __init__(self, channel_linked_item: dict[str, Any]):\n \"\"\"Model for channel linked items.\"\"\"\n self.raw = channel_linked_item\n self.channel_sku_id = channel_linked_item[\"ChannelSKURowId\"]\n self.sku = channel_linked_item[\"SKU\"]\n self.source = channel_linked_item[\"Source\"]\n self.sub_source = channel_linked_item[\"SubSource\"]\n self.update_status = channel_linked_item[\"UpdateStatus\"]\n self.channel_reference_id = channel_linked_item[\"ChannelReferenceId\"]\n self.last_update = parse_date_time(channel_linked_item[\"LastUpdate\"])\n self.max_listed_quantity = channel_linked_item[\"MaxListedQuantity\"]\n self.end_when_stock = channel_linked_item[\"EndWhenStock\"]\n self.submitted_quantity = channel_linked_item[\"SubmittedQuantity\"]\n self.listed_quantity = channel_linked_item[\"ListedQuantity\"]\n self.stock_percentage = channel_linked_item[\"StockPercentage\"]\n self.ignore_sync = channel_linked_item[\"IgnoreSync\"]\n self.is_multi_location = channel_linked_item[\"IsMultiLocation\"]\n self.stock_item_id = channel_linked_item[\"StockItemId\"]\n","sub_path":"linnapi/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"287803095","text":"# how to add items in our list\r\n# most common & most important thing you can do with your list.\r\n# append() func is used to add the data in our list.\r\n\r\n# fruits = ['grapes','apple']\r\n# fruits.append('mango') #append() method always add the data in the end of the list.\r\n# print(fruits)\r\n\r\n#In real life programs, mostly we have empty list but we mostly used append method to add items continously.\r\nfruits = []\r\nfruits.append('apple')\r\nfruits.append('mango')\r\nfruits.append('grapes')\r\nfruits.append('banana')\r\nprint(fruits)\r\n\r\n","sub_path":"append_data_in_list.py","file_name":"append_data_in_list.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"324431371","text":"# -*- coding: utf-8 -*-\nimport os, requests\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"sgds_admin.settings\")\n\nfrom django.core.files import File\nfrom django.core.files.temp import NamedTemporaryFile\n\nfrom app.models import Material, Nutrient, Nrv, Recipe\nfrom app.macros import MATERIAL_TYPE_DICT\n\n\nclass SavePipeline(object):\n def _get_img_file(self, image_url):\n image_content = requests.get(image_url).content\n img_temp = NamedTemporaryFile(delete=True)\n img_temp.write(image_content)\n img_temp.flush()\n return File(img_temp)\n\n def _save_fruit(self, data):\n fruits = Material.objects.filter(name=data['name'])\n if fruits:\n fruit = fruits[0]\n else:\n fruit = Material(name=data['name'],\n type=MATERIAL_TYPE_DICT[:'fruit'],\n intro=data['intro'],\n efficacy=data['efficacy'],\n choice=data['choice'],\n store=data['store'])\n img_file = self._get_img_file(data['image'])\n fruit.image.save('temp.jpg', img_file, save=True)\n fruit.save()\n return fruit\n\n def _save_nutrient(self, key):\n nutrients = Nutrient.objects.filter(name=key)\n if nutrients:\n nutrient = nutrients[0]\n else:\n nutrient = Nutrient(name=key)\n nutrient.save()\n return nutrient\n\n def _save_nrv(self, m, n, v):\n nrvs = Nrv.objects.filter(material=m, nutrient=n)\n if not nrvs:\n nrv = Nrv(material=m, nutrient=n, value=v)\n nrv.save()\n\n def _save_default(self, name):\n defaults = Material.objects.filter(name=name)\n if defaults:\n default = defaults[0]\n else:\n default = Material(name=name,\n type=MATERIAL_TYPE_DICT[:'default'])\n default.save()\n return default\n def _save_recipe(self, data, ingres):\n recipe = Recipe(title=data['title'],\n description=data['description'],\n tips=data['tips'],\n source_url=data['source_url'],\n cooked_count=data['count'],\n create_time=data['date'])\n img_file = self._get_img_file(data['image_url'])\n recipe.image.save('temp.jpg', img_file, save=True)\n recipe.save()\n recipe.ingredients.add(*ingres)\n return recipe\n\n def _save_step(self, data, recipe):\n number = 1\n for description, image_url in zip(data['step_descriptions'], data['step_image_urls']):\n step = Step(recipe=recipe, description=description, order=number)\n img_file = self._get_img_file(image_url)\n step.image.save('temp.jpg', img_file, save=True)\n step.save()\n number += 1\n def process_item(self, item, spider):\n data = item['data']\n if spider.name == 'meishij.fruit':\n fruit = self._save_fruit(data)\n for key, value in data['nrv'].items():\n nutrient = self._save_nutrient(key)\n self._save_nrv(fruit, nutrient, value)\n elif spider.name == 'meishij.recipe':\n ingres = []\n for ingredient in data['ingredients']:\n ingre = self._save_default(ingredient)\n ingres.append(ingre)\n recipe = self._save_recipe(data, ingres)\n self._save_step(data, recipe)\n","sub_path":"crawlers/meishij/meishij/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"69824869","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Wei, Shuowen\n\nhttps://leetcode.com/problems/keys-and-rooms/\n\n\"\"\"\nclass Solution(object):\n def canVisitAllRooms(self, rooms):\n \"\"\"\n :type rooms: List[List[int]]\n :rtype: bool\n \"\"\"\n entered = [False]*len(rooms)\n entered[0] = True\n stack = [0]\n while len(stack) > 0 : \n openRoom = stack.pop()\n for k in rooms[openRoom]:\n if not entered[k]:\n entered[k] = True\n stack.append(k)\n return entered.count(True) == len(rooms)","sub_path":"Medium/LC841KeysAndRooms.py","file_name":"LC841KeysAndRooms.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"445758451","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n '''\n DP\n '''\n if len(nums) == 1: return nums[0]\n \n prevMax1, prevMax2, currMax1, currMax2 = 0, 0, 0, 0\n \n for i, n in enumerate(nums):\n if 0 <= i < len(nums) - 1:\n temp = currMax1\n currMax1 = max(prevMax1 + n, currMax1)\n prevMax1 = temp\n if 1 <= i < len(nums):\n temp = currMax2\n currMax2 = max(prevMax2 + n, currMax2)\n prevMax2 = temp\n \n return max(currMax1, currMax2)\n \n \n # TC: O(n)\n \n # SC: O(1)\n \n # HINT: comparing to House Robber I, \"the problem becomes to rob either House[1]-House[n-1] \n # or House[2]-House[n], depending on which choice offers more money.\n # BUT NOW YOU NEED TO TAKE CARE OF THE CORNER CASE AS i IS CHECKED WITHIN FOR LOOP\n \n # ref: https://leetcode.com/problems/house-robber-ii/solution/\n","sub_path":"213_HouseRobberII/213_HouseRobberII.py","file_name":"213_HouseRobberII.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"558158493","text":"from services.BureauActif.libbureauactif.db.Base import db, BaseModel\nimport datetime\nimport os\n\n\nclass BureauActifTimelineDayEntry(db.Model, BaseModel):\n __tablename__ = \"ba_timeline_day_entry\"\n id_timeline_day_entry = db.Column(db.Integer, db.Sequence('id_timeline_day_entry_sequence'), primary_key=True,\n autoincrement=True)\n id_timeline_day = db.Column(db.Integer, db.ForeignKey('ba_timeline_day.id_timeline_day', ondelete='cascade'),\n nullable=True)\n id_timeline_entry_type = db.Column(db.Integer,\n db.ForeignKey('ba_timeline_entry_type.id_timeline_entry_type',\n ondelete='cascade'),\n nullable=True)\n value = db.Column(db.Float, nullable=False)\n start_time = db.Column(db.TIMESTAMP(timezone=True), nullable=True)\n end_time = db.Column(db.TIMESTAMP(timezone=True), nullable=True)\n entry_type = db.relationship('BureauActifTimelineEntryType')\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n ignore_fields.append('entry_type')\n\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def get_timeline_day_entries(timeline_day_id: int):\n return BureauActifTimelineDayEntry.query.filter_by(id_timeline_day=timeline_day_id).order_by(\n BureauActifTimelineDayEntry.id_timeline_day_entry).all()\n\n @classmethod\n def insert(cls, new_entry):\n super().insert(new_entry)\n db.session.commit()\n return new_entry\n\n @staticmethod\n def update_entry(id_entry, value):\n BureauActifTimelineDayEntry.query.filter_by(id_timeline_day_entry=id_entry).update(dict(value=value))\n db.session.commit()\n\n @classmethod\n def update(cls, id_entry, value):\n super().update(id_entry, dict(value=value))\n\n @staticmethod\n def create_defaults():\n first_day1 = BureauActifTimelineDayEntry()\n first_day1.id_timeline_entry_type = 1\n first_day1.value = 7.8\n first_day1.id_timeline_day = 1\n db.session.add(first_day1)\n\n first_day2 = BureauActifTimelineDayEntry()\n first_day2.id_timeline_entry_type = 2\n first_day2.value = 0.5\n first_day2.id_timeline_day = 1\n db.session.add(first_day2)\n\n first_day3 = BureauActifTimelineDayEntry()\n first_day3.id_timeline_entry_type = 5\n first_day3.value = 0.5\n first_day3.id_timeline_day = 1\n db.session.add(first_day3)\n\n first_day4 = BureauActifTimelineDayEntry()\n first_day4.id_timeline_entry_type = 3\n first_day4.value = 0.1\n first_day4.id_timeline_day = 1\n db.session.add(first_day4)\n\n first_day5 = BureauActifTimelineDayEntry()\n first_day5.id_timeline_entry_type = 4\n first_day5.value = 0.02\n first_day5.id_timeline_day = 1\n db.session.add(first_day5)\n\n first_day6 = BureauActifTimelineDayEntry()\n first_day6.id_timeline_entry_type = 2\n first_day6.value = 0.4\n first_day6.id_timeline_day = 1\n db.session.add(first_day6)\n\n first_day7 = BureauActifTimelineDayEntry()\n first_day7.id_timeline_entry_type = 3\n first_day7.value = 0.5\n first_day7.id_timeline_day = 1\n db.session.add(first_day7)\n\n first_day8 = BureauActifTimelineDayEntry()\n first_day8.id_timeline_entry_type = 5\n first_day8.value = 0.5\n first_day8.id_timeline_day = 1\n db.session.add(first_day8)\n\n first_day9 = BureauActifTimelineDayEntry()\n first_day9.id_timeline_entry_type = 3\n first_day9.value = 0.5\n first_day9.id_timeline_day = 1\n db.session.add(first_day9)\n\n first_day10 = BureauActifTimelineDayEntry()\n first_day10.id_timeline_entry_type = 5\n first_day10.value = 0.5\n first_day10.id_timeline_day = 1\n db.session.add(first_day10)\n\n first_day11 = BureauActifTimelineDayEntry()\n first_day11.id_timeline_entry_type = 6\n first_day11.value = 0.5\n first_day11.id_timeline_day = 1\n db.session.add(first_day11)\n\n first_day12 = BureauActifTimelineDayEntry()\n first_day12.id_timeline_entry_type = 2\n first_day12.value = 0.4\n first_day12.id_timeline_day = 1\n db.session.add(first_day12)\n\n first_day13 = BureauActifTimelineDayEntry()\n first_day13.id_timeline_entry_type = 4\n first_day13.value = 0.1\n first_day13.id_timeline_day = 1\n db.session.add(first_day13)\n\n first_day14 = BureauActifTimelineDayEntry()\n first_day14.id_timeline_entry_type = 3\n first_day14.value = 0.5\n first_day14.id_timeline_day = 1\n db.session.add(first_day14)\n\n first_day15 = BureauActifTimelineDayEntry()\n first_day15.id_timeline_entry_type = 5\n first_day15.value = 0.5\n first_day15.id_timeline_day = 1\n db.session.add(first_day15)\n\n first_day16 = BureauActifTimelineDayEntry()\n first_day16.id_timeline_entry_type = 3\n first_day16.value = 0.5\n first_day16.id_timeline_day = 1\n db.session.add(first_day16)\n\n first_day17 = BureauActifTimelineDayEntry()\n first_day17.id_timeline_entry_type = 5\n first_day17.value = 0.5\n first_day17.id_timeline_day = 1\n db.session.add(first_day17)\n\n first_day18 = BureauActifTimelineDayEntry()\n first_day18.id_timeline_entry_type = 6\n first_day18.value = 0.5\n first_day18.id_timeline_day = 1\n db.session.add(first_day18)\n\n first_day19 = BureauActifTimelineDayEntry()\n first_day19.id_timeline_entry_type = 5\n first_day19.value = 0.5\n first_day19.id_timeline_day = 1\n db.session.add(first_day19)\n\n first_day20 = BureauActifTimelineDayEntry()\n first_day20.id_timeline_entry_type = 2\n first_day20.value = 0.4\n first_day20.id_timeline_day = 1\n db.session.add(first_day20)\n\n first_day21 = BureauActifTimelineDayEntry()\n first_day21.id_timeline_entry_type = 5\n first_day21.value = 0.1\n first_day21.id_timeline_day = 1\n db.session.add(first_day21)\n\n first_day22 = BureauActifTimelineDayEntry()\n first_day22.id_timeline_entry_type = 3\n first_day22.value = 0.5\n first_day22.id_timeline_day = 1\n db.session.add(first_day22)\n\n second_day1 = BureauActifTimelineDayEntry()\n second_day1.id_timeline_entry_type = 1\n second_day1.value = 8\n second_day1.id_timeline_day = 2\n db.session.add(second_day1)\n\n second_day2 = BureauActifTimelineDayEntry()\n second_day2.id_timeline_entry_type = 3\n second_day2.value = 0.5\n second_day2.id_timeline_day = 2\n db.session.add(second_day2)\n\n second_day3 = BureauActifTimelineDayEntry()\n second_day3.id_timeline_entry_type = 2\n second_day3.value = 0.5\n second_day3.id_timeline_day = 2\n db.session.add(second_day3)\n\n second_day4 = BureauActifTimelineDayEntry()\n second_day4.id_timeline_entry_type = 3\n second_day4.value = 0.5\n second_day4.id_timeline_day = 2\n db.session.add(second_day4)\n\n second_day5 = BureauActifTimelineDayEntry()\n second_day5.id_timeline_entry_type = 2\n second_day5.value = 0.5\n second_day5.id_timeline_day = 2\n db.session.add(second_day5)\n\n second_day6 = BureauActifTimelineDayEntry()\n second_day6.id_timeline_entry_type = 4\n second_day6.value = 0.2\n second_day6.id_timeline_day = 2\n db.session.add(second_day6)\n\n second_day7 = BureauActifTimelineDayEntry()\n second_day7.id_timeline_entry_type = 3\n second_day7.value = 0.5\n second_day7.id_timeline_day = 2\n db.session.add(second_day7)\n\n second_day8 = BureauActifTimelineDayEntry()\n second_day8.id_timeline_entry_type = 2\n second_day8.value = 0.5\n second_day8.id_timeline_day = 2\n db.session.add(second_day8)\n\n second_day9 = BureauActifTimelineDayEntry()\n second_day9.id_timeline_entry_type = 3\n second_day9.value = 0.5\n second_day9.id_timeline_day = 2\n db.session.add(second_day9)\n\n second_day10 = BureauActifTimelineDayEntry()\n second_day10.id_timeline_entry_type = 5\n second_day10.value = 0.5\n second_day10.id_timeline_day = 2\n db.session.add(second_day10)\n\n second_day11 = BureauActifTimelineDayEntry()\n second_day11.id_timeline_entry_type = 3\n second_day11.value = 0.5\n second_day11.id_timeline_day = 2\n db.session.add(second_day11)\n\n second_day12 = BureauActifTimelineDayEntry()\n second_day12.id_timeline_entry_type = 2\n second_day12.value = 0.5\n second_day12.id_timeline_day = 2\n db.session.add(second_day12)\n\n second_day13 = BureauActifTimelineDayEntry()\n second_day13.id_timeline_entry_type = 3\n second_day13.value = 0.5\n second_day13.id_timeline_day = 2\n db.session.add(second_day13)\n\n second_day14 = BureauActifTimelineDayEntry()\n second_day14.id_timeline_entry_type = 4\n second_day14.value = 0.87\n second_day14.id_timeline_day = 2\n db.session.add(second_day14)\n\n second_day15 = BureauActifTimelineDayEntry()\n second_day15.id_timeline_entry_type = 3\n second_day15.value = 0.5\n second_day15.id_timeline_day = 2\n db.session.add(second_day15)\n\n second_day16 = BureauActifTimelineDayEntry()\n second_day16.id_timeline_entry_type = 5\n second_day16.value = 0.5\n second_day16.id_timeline_day = 2\n db.session.add(second_day16)\n\n second_day17 = BureauActifTimelineDayEntry()\n second_day17.id_timeline_entry_type = 3\n second_day17.value = 0.5\n second_day17.id_timeline_day = 2\n db.session.add(second_day17)\n\n third_day1 = BureauActifTimelineDayEntry()\n third_day1.id_timeline_entry_type = 1\n third_day1.value = 7.45\n third_day1.id_timeline_day = 3\n db.session.add(third_day1)\n\n third_day2 = BureauActifTimelineDayEntry()\n third_day2.id_timeline_entry_type = 3\n third_day2.value = 0.5\n third_day2.id_timeline_day = 3\n db.session.add(third_day2)\n\n third_day3 = BureauActifTimelineDayEntry()\n third_day3.id_timeline_entry_type = 5\n third_day3.value = 0.5\n third_day3.id_timeline_day = 3\n db.session.add(third_day3)\n\n third_day4 = BureauActifTimelineDayEntry()\n third_day4.id_timeline_entry_type = 3\n third_day4.value = 0.5\n third_day4.id_timeline_day = 3\n db.session.add(third_day4)\n\n third_day5 = BureauActifTimelineDayEntry()\n third_day5.id_timeline_entry_type = 2\n third_day5.value = 0.1\n third_day5.id_timeline_day = 3\n db.session.add(third_day5)\n\n third_day6 = BureauActifTimelineDayEntry()\n third_day6.id_timeline_entry_type = 5\n third_day6.value = 0.4\n third_day6.id_timeline_day = 3\n db.session.add(third_day6)\n\n third_day7 = BureauActifTimelineDayEntry()\n third_day7.id_timeline_entry_type = 3\n third_day7.value = 0.5\n third_day7.id_timeline_day = 3\n db.session.add(third_day7)\n\n third_day8 = BureauActifTimelineDayEntry()\n third_day8.id_timeline_entry_type = 5\n third_day8.value = 0.5\n third_day8.id_timeline_day = 3\n db.session.add(third_day8)\n\n third_day9 = BureauActifTimelineDayEntry()\n third_day9.id_timeline_entry_type = 3\n third_day9.value = 0.5\n third_day9.id_timeline_day = 3\n db.session.add(third_day9)\n\n third_day10 = BureauActifTimelineDayEntry()\n third_day10.id_timeline_entry_type = 4\n third_day10.value = 0.7\n third_day10.id_timeline_day = 3\n db.session.add(third_day10)\n\n third_day11 = BureauActifTimelineDayEntry()\n third_day11.id_timeline_entry_type = 6\n third_day11.value = 0.5\n third_day11.id_timeline_day = 3\n db.session.add(third_day11)\n\n third_day12 = BureauActifTimelineDayEntry()\n third_day12.id_timeline_entry_type = 5\n third_day12.value = 0.5\n third_day12.id_timeline_day = 3\n db.session.add(third_day12)\n\n third_day13 = BureauActifTimelineDayEntry()\n third_day13.id_timeline_entry_type = 6\n third_day13.value = 0.1\n third_day13.id_timeline_day = 3\n db.session.add(third_day13)\n\n third_day14 = BureauActifTimelineDayEntry()\n third_day14.id_timeline_entry_type = 5\n third_day14.value = 0.5\n third_day14.id_timeline_day = 3\n db.session.add(third_day14)\n\n third_day15 = BureauActifTimelineDayEntry()\n third_day15.id_timeline_entry_type = 3\n third_day15.value = 0.5\n third_day15.id_timeline_day = 3\n db.session.add(third_day15)\n\n third_day16 = BureauActifTimelineDayEntry()\n third_day16.id_timeline_entry_type = 2\n third_day16.value = 0.3\n third_day16.id_timeline_day = 3\n db.session.add(third_day16)\n\n third_day17 = BureauActifTimelineDayEntry()\n third_day17.id_timeline_entry_type = 5\n third_day17.value = 0.5\n third_day17.id_timeline_day = 3\n db.session.add(third_day17)\n\n third_day18 = BureauActifTimelineDayEntry()\n third_day18.id_timeline_entry_type = 3\n third_day18.value = 0.2\n third_day18.id_timeline_day = 3\n db.session.add(third_day18)\n\n third_day19 = BureauActifTimelineDayEntry()\n third_day19.id_timeline_entry_type = 4\n third_day19.value = 0.2\n third_day19.id_timeline_day = 3\n db.session.add(third_day19)\n\n third_day20 = BureauActifTimelineDayEntry()\n third_day20.id_timeline_entry_type = 3\n third_day20.value = 0.3\n third_day20.id_timeline_day = 3\n db.session.add(third_day20)\n\n db.session.commit()\n","sub_path":"teraserver/python/services/BureauActif/libbureauactif/db/models/BureauActifTimelineDayEntry.py","file_name":"BureauActifTimelineDayEntry.py","file_ext":"py","file_size_in_byte":14278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"412841750","text":"#!/usr/bin/env python\n#\n# Copyright 2014 The BCE Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the license.txt file.\n#\n\nimport bce.locale.lang_id as _langid\nimport bce.locale.translation as _tr\nimport sys as _std_sys\nimport sympy as _sympy\nimport json as _std_json\nimport bce.option as _bce_opt\nimport bce.logic.main as _bce_main\nimport bce.logic.error as _le\nimport bce.parser.common.error as _pe\nimport bce.base.version as _bce_ver\n\n\ndef check_user_typed_ce(expr):\n \"\"\"Check whether the expression user typed contains no invalid character.\n\n :param expr: The user typed expression.\n :return: Return True if the expression contains no invalid character.\n \"\"\"\n\n # Valid characters table.\n valid_ch = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ()[]{}<>;+-*/.0123456789_=^\"\n\n # Check each character in the expression.\n for ch in expr:\n if valid_ch.find(ch) == -1:\n return False\n\n return True\n\n\ndef read_user_abbreviation_dictionary(file_path):\n \"\"\"Read user abbreviation dictionary from a external JSON file.\n\n :param file_path: The path of the JSON file.\n :return: Parsed user abbreviation dictionary data.\n \"\"\"\n\n # Read file data.\n fp = open(file_path, \"r\")\n file_data = fp.read()\n fp.close()\n\n # Parse JSON.\n parsed_data = _std_json.loads(file_data)\n\n # Safe check.\n if not isinstance(parsed_data, dict):\n return None\n\n for abbr in parsed_data:\n # Get atom dictionary.\n atom_dict = parsed_data[abbr]\n\n # Safe check.\n if not isinstance(abbr, str):\n return None\n\n for atom in atom_dict:\n # Safe check.\n if not isinstance(atom, str):\n return None\n\n # Get the origin atom count.\n atom_count = atom_dict[atom]\n\n # Safe check.\n if (not isinstance(atom_count, int)) and (not isinstance(atom_count, float)):\n return None\n\n # Convert the origin atom count to SymPy internal presentation.\n atom_dict[atom] = _sympy.Rational(atom_count, 1)\n\n # Re-write the atom dictionary to the UA dictionary.\n parsed_data[abbr] = atom_dict\n\n return parsed_data\n\n\ndef run(ua_dict_file=None, enable_auto_correct=True, quiet=False):\n \"\"\"Run BCE interactive shell.\n\n :param ua_dict_file: The path of user abbreviation dictionary file.\n :param enable_auto_correct: Turn on auto-correct function if this parameter is True.\n :param quiet: Don't print extra messages if this parameter is True.\n :return: Exit code of the interactive shell (0 = Success).\n \"\"\"\n\n # Get BCE version.\n bce_ver = _bce_ver.get_version()\n\n # Check python version.\n use_raw_input = False\n py_ver = _std_sys.version_info\n py_major_ver = py_ver[0]\n py_minor_ver = py_ver[1]\n\n if py_major_ver != 2 and py_major_ver != 3:\n if not quiet:\n print(_tr.get_translated_string(_langid.LANG_ID_IAS_E_PYV))\n\n return 1\n\n if py_major_ver == 2:\n if py_minor_ver < 7:\n if not quiet:\n print(_tr.get_translated_string(_langid.LANG_ID_IAS_E_PYV))\n\n return 1\n\n # Use raw_input() routine if the runtime is Python 2.7.x.\n use_raw_input = True\n\n # Generate default BCE options.\n options = _bce_opt.Option()\n\n # Read UA dictionary.\n if not ua_dict_file is None:\n try:\n ua_dict = read_user_abbreviation_dictionary(ua_dict_file)\n except FileNotFoundError:\n if not quiet:\n print(_tr.get_translated_string(_langid.LANG_ID_IAS_E_UA_FILE_NOT_FOUND))\n\n return 1\n except PermissionError:\n if not quiet:\n print(_tr.get_translated_string(_langid.LANG_ID_IAS_E_UA_NO_PERMISSION))\n\n return 1\n except ValueError:\n if not quiet:\n print(_tr.get_translated_string(_langid.LANG_ID_IAS_E_INCORRECT_SYNTAX))\n\n return 1\n\n # Enable the UA dictionary.\n options.enable_auto_correct()\n options.set_user_abbreviation_dictionary(ua_dict)\n else:\n # Disable the UA dictionary.\n options.disable_user_abbreviation_dictionary()\n\n if enable_auto_correct:\n # Enable the auto-correct function.\n options.enable_auto_correct()\n else:\n # Disable the auto-correct function.\n options.disable_auto_correct()\n\n # Get whether we're in a TTY.\n if _std_sys.stdin.isatty() and not quiet:\n prompt_str = \">>> \"\n\n # Print the banner.\n if not quiet:\n print(_tr.get_translated_string(_langid.LANG_ID_IAS_BANNER, {\"$1\": str(bce_ver[0]),\n \"$2\": str(bce_ver[1]),\n \"$3\": str(bce_ver[2])}))\n else:\n prompt_str = \"\"\n\n while True:\n try:\n # Read input.\n if use_raw_input:\n typed = raw_input(prompt_str)\n else:\n typed = input(prompt_str)\n\n typed = typed.strip()\n\n # Ignore this line if it's empty or just a comment line.\n if len(typed) == 0 or typed[0] == \"#\":\n continue\n\n # Exit the shell if user typed \"exit\".\n if typed == \"exit\":\n break\n\n # Check the expression.\n if not check_user_typed_ce(typed):\n print(_tr.get_translated_string(_langid.LANG_ID_IAS_E_IV_CH))\n continue\n\n # Balance the CE and print the result.\n try:\n print(_bce_main.auto_balance_chemical_equation(typed, options))\n except _le.LogicError as err:\n print(err.to_string())\n except _pe.ParserError as err:\n print(err.to_string())\n except EOFError:\n break\n\n return 0","sub_path":"bce/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":6047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"20558857","text":"import asyncio\nimport io\nfrom io import BytesIO\n\nimport aiohttp\nimport async_timeout\nimport numpy as np\nimport uvloop\nfrom aiohttp import web\nfrom aiohttp.web import HTTPBadRequest, HTTPNotFound, HTTPUnsupportedMediaType\n\nfrom classify_nsfw import caffe_preprocess_and_compute, load_model\n\n\nnsfw_net, caffe_transformer = load_model()\n\n\ndef classify(image: bytes) -> np.float64:\n scores = caffe_preprocess_and_compute(\n image, caffe_transformer=caffe_transformer, caffe_net=nsfw_net, output_layers=[\"prob\"])\n return scores[1]\n\n\nasync def fetch(session, url):\n with async_timeout.timeout(10):\n async with session.get(url) as response:\n if response.status == 404:\n raise HTTPNotFound()\n return await response.read()\n\n\nclass API(web.View):\n async def post(self):\n request = self.request\n print(request)\n data = await request.post()\n try:\n if 'url' in data.keys():\n image = await fetch(session, data[\"url\"])\n elif 'file' in data.keys():\n image = data['file'].file.read()\n nsfw_prob = classify(image)\n text = nsfw_prob.astype(str)\n try:\n inter = float(text)\n except:\n inter = -101\n finally:\n if inter < 1:\n return web.json_response({'status': False, 'reason': 'modle error'})\n else:\n return web.json_response({'status': True, 'score': inter})\n except KeyError:\n # return HTTPBadRequest(text=\"Missing `url` POST parameter\")\n return web.json_response({'status': False, 'reason': 'Missing `url` POST parameter'})\n except OSError as e:\n if \"cannot identify\" in str(e):\n return web.json_response({'status': False, 'reason': 'Invalid image'})\n else:\n raise e\n\n\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\nsession = aiohttp.ClientSession()\napp = web.Application()\napp.router.add_route(\"*\", \"/\", API)\nweb.run_app(app)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"206211528","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n \n pre_head = ListNode(0)\n pre_head.next = head\n \n count = 0\n node_1 = pre_head\n while count < n:\n node_1 = node_1.next\n count += 1\n \n node_2 = pre_head\n pre_node_2 = None\n \n while node_1:\n node_1 = node_1.next\n pre_node_2 = node_2\n node_2 = node_2.next\n \n pre_node_2.next = node_2.next\n \n return pre_head.next\n ","sub_path":"19-Remove-Nth-Node-From-End-of-List/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"31580984","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef saveBasicPlot(data, path, name):\n\n\tfig = plt.figure()\n\tplt.plot(data.index, data.Close)\n\tplt.xlabel(\"Date\")\n\tplt.xticks(rotation=45, ha='right', fontsize=10)\n\tplt.ylabel(\"Stock Price\")\n\tplt.title(\"Trend over Given Range\")\n\tfig.subplots_adjust(bottom = 0.2)\n\tfig.savefig(path + '/' + name)\n\treturn fig\n\n\ndef saveReturnsPlot(returns_data, path, name):\n\n\tfig = plt.figure()\n\tplt.plot(returns_data.index, returns_data)\n\tplt.xlabel(\"Date\")\n\tplt.xticks(rotation=45, ha='right', fontsize=10)\n\tplt.ylabel(\"Returns\")\n\tplt.title(\"Returns Trend over Given Range\")\n\tfig.subplots_adjust(bottom = 0.2)\n\tfig.savefig(path + '/' + name)\n\treturn fig","sub_path":"stock_modeling/quotes/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"327253106","text":"import random\nimport os\nfrom draw_dice import Draw_dice\nclass Player():\n def __init__(self,goal):\n self.score = 0\n self.can_to_throw = False\n self.tmp_sum =0\n self.goal =goal\n def is_success(self):\n return self.score+self.tmp_sum >=self.goal\n def throw(self):\n if self.can_to_throw :\n i = random.randint(1,6)\n if i == 1:\n self.can_to_throw = False\n self.tmp_sum = 0\n Draw_dice(i)\n print(\"---------------***玩家投掷点数为1***---------------\")\n print(\"---------------***玩家本局得分为%d***---------------\" % (self.score))\n print(\"---------------***玩家本回合累计得分为0***---------------\")\n if self.is_success():\n return 3\n self.stop()\n return 0\n else:\n self.tmp_sum = self.tmp_sum + i\n Draw_dice(i)\n print(\"---------------***玩家投掷点数为%d***---------------\" %(i))\n print(\"---------------***玩家本局得分为%d***---------------\" % (self.score))\n print(\"---------------***玩家本回合累计得分为%d***---------------\" % (self.tmp_sum))\n if self.is_success():\n return 3\n return 1\n else:\n print(\"现在是其他玩家的回合,请等待\")\n raise Exception()\n def stop(self):\n if self.can_to_throw:\n self.score = self.score + self.tmp_sum\n else:\n self.score = self.score\n\nif __name__ == '__main__':\n a = Player()\n a.can_to_throw = True\n while a.throw():\n print(a.tmp_sum)","sub_path":"pig dice/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"507077588","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nfrom speedCalculator import *\nfrom regChecker import *\nfrom fileWriter import *\nfrom ticketGenerator import *\n\ndef parse(response):\n result = response.split(\",\")\n result.append(avgspeed(result[2], result[3]))\n result.append(speeding(result[6], result[4]))\n result.append(registration(result[1]))\n if result[8] == \"False\":\n csvwriter(result, \"wrongreg\")\n if result[7] == \"True\":\n csvwriter(result, \"wrongspd\")\n findvalue(result[5])\n result[6] = str(round(float(result[6])))","sub_path":"CA/Async2/mainFile.py","file_name":"mainFile.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"351376914","text":"\r\nfrom libraries import read_files_functions, pixel_shapes_functions, pixel_functions, image_functions, same_shapes_functions\r\n\r\n\r\nfrom PIL import ImageTk, Image\r\nimport os, sys\r\nimport pickle\r\nimport copy\r\nimport math\r\nimport shutil\r\n\r\nfrom libraries.cv_globals import top_shapes_dir, top_images_dir, internal, scnd_stg_all_files, frth_smallest_pixc, third_smallest_pixc\r\n\r\ndirectory = sys.argv[1]\r\nshapes_type = \"intnl_spixcShp\"\r\n\r\n\r\nif directory != \"\" and directory[-1] != '/':\r\n directory +='/'\r\n\r\nacross_all_files_ddir = top_shapes_dir + directory + scnd_stg_all_files + \"/data/\"\r\nacross_all_files_dfile = across_all_files_ddir + \"all_files.data\"\r\nwith open (across_all_files_dfile, 'rb') as fp:\r\n # {'10.11': {('79935', '58671'), ('39441', '39842'), ('45331', '36516')}, '11.12': {('39842', '40243'), ('26336', '27137'), ... }, ... }\r\n acrs_all_files_shapes = pickle.load(fp)\r\nfp.close()\r\n\r\n\r\nall_matches_so_far_dfile = across_all_files_ddir + \"all_matches.data\"\r\nif os.path.exists( all_matches_so_far_dfile ):\r\n with open (all_matches_so_far_dfile, 'rb') as fp:\r\n all_matches_so_far = pickle.load(fp)\r\n fp.close()\r\n\r\nelse:\r\n all_matches_so_far = acrs_all_files_shapes\r\n\r\n\r\nif shapes_type == \"normal\":\r\n print(\"ERROR. shapes_type normal is not supported\")\r\n sys.exit()\r\n\r\nelif shapes_type == \"intnl_spixcShp\":\r\n s_pixcShp_intnl_dir = top_shapes_dir + directory + \"spixc_shapes/\" + internal + \"/\"\r\n \r\n shapes_dir = s_pixcShp_intnl_dir + \"shapes/\"\r\n\r\n\r\nelse:\r\n print(\"ERROR at \" + str( os.path.basename(__file__) ) + \" shapes_type \" + shapes_type + \" is not supported\")\r\n sys.exit()\r\n\r\n\r\ndef check_shape_size( param_im1shapeid, param_im2shapeid, param_shapes, param_ano_shapes ):\r\n # check if size is too different\r\n im1shapes_total = len( param_shapes[param_im1shapeid] )\r\n im2shapes_total = len( param_ano_shapes[param_im2shapeid] )\r\n \r\n im1_2pixels_diff = abs( im1shapes_total - im2shapes_total )\r\n if im1_2pixels_diff != 0:\r\n if im1shapes_total > im2shapes_total:\r\n if im1_2pixels_diff / im2shapes_total > 1:\r\n return True\r\n else:\r\n if im1_2pixels_diff / im1shapes_total > 1:\r\n return True\r\n\r\n return False \r\n\r\n\r\n\r\nresult_matches = {}\r\nref_imagefile_op = False\r\nim_width = None\r\nim_height = None\r\nim_size = None\r\nfor each_files in all_matches_so_far:\r\n print(each_files)\r\n\r\n result_matches[each_files] = set()\r\n\r\n cur_im1file = each_files.split(\".\")[0]\r\n cur_im2file = each_files.split(\".\")[1]\r\n\r\n if ref_imagefile_op is False:\r\n im1 = Image.open(top_images_dir + directory + cur_im1file + \".png\" )\r\n im_size = im1.size\r\n im_width, im_height = im_size\r\n \r\n ref_imagefile_op = True\r\n\r\n\r\n shapes_dfile = shapes_dir + cur_im1file + \"shapes.data\"\r\n with open (shapes_dfile, 'rb') as fp:\r\n # { '79999': ['79999', ... ], ... }\r\n # { 'shapeid': [ pixel indexes ], ... }\r\n im1shapes = pickle.load(fp)\r\n fp.close() \r\n\r\n im1shapes_in_shape_coord = {}\r\n im1shapes_by_pindex = {}\r\n im1shapes_boundaries = {}\r\n for shapeid in im1shapes:\r\n cur_pixels = set()\r\n \r\n for temp_p in im1shapes[shapeid]:\r\n im1shapes_by_pindex[ temp_p ] = shapeid\r\n xy = pixel_functions.convert_pindex_to_xy( temp_p, im_width )\r\n \r\n cur_pixels.add( xy )\r\n \r\n im1shapes[shapeid] = cur_pixels \r\n im1shapes_in_shape_coord[shapeid] = pixel_shapes_functions.get_shape_pos_in_shp_coord( im1shapes[ shapeid ], im_width, param_shp_type=1 ) \r\n im1shapes_boundaries[shapeid] = pixel_shapes_functions.get_boundary_pixels( cur_pixels ) \r\n\r\n\r\n im2shapes_dfile = shapes_dir + cur_im2file + \"shapes.data\"\r\n with open (im2shapes_dfile, 'rb') as fp:\r\n # { '79999': ['79999', ... ], ... }\r\n # { 'shapeid': [ pixel indexes ], ... }\r\n im2shapes = pickle.load(fp)\r\n fp.close() \r\n\r\n im2shapes_boundaries = {}\r\n im2shapes_in_shape_coord = {}\r\n im2shapes_by_pindex = {}\r\n for shapeid in im2shapes:\r\n cur_pixels = set()\r\n \r\n for temp_p in im2shapes[shapeid]:\r\n im2shapes_by_pindex[temp_p] = shapeid\r\n xy = pixel_functions.convert_pindex_to_xy( temp_p, im_width )\r\n \r\n cur_pixels.add( xy )\r\n \r\n im2shapes[shapeid] = cur_pixels \r\n im2shapes_in_shape_coord[shapeid] = pixel_shapes_functions.get_shape_pos_in_shp_coord( im2shapes[ shapeid ], im_width, param_shp_type=1 ) \r\n im2shapes_boundaries[shapeid] = pixel_shapes_functions.get_boundary_pixels( cur_pixels ) \r\n \r\n\r\n im1shape_neighbors_file = s_pixcShp_intnl_dir + \"shape_nbrs/\" + cur_im1file + \"_shape_nbrs.txt\"\r\n im2shape_neighbors_file = s_pixcShp_intnl_dir + \"shape_nbrs/\" + cur_im2file + \"_shape_nbrs.txt\"\r\n\r\n im1shapes_neighbors = read_files_functions.rd_dict_k_v_l(cur_im1file, directory, im1shape_neighbors_file)\r\n im2shapes_neighbors = read_files_functions.rd_dict_k_v_l(cur_im2file, directory, im2shape_neighbors_file)\r\n\r\n im1shapes_colors = pixel_shapes_functions.get_all_shapes_colors(cur_im1file, directory, shapes_type=shapes_type, min_colors=True)\r\n im2shapes_colors = pixel_shapes_functions.get_all_shapes_colors(cur_im2file, directory, shapes_type=shapes_type, min_colors=True)\r\n\r\n\r\n for each_shape in acrs_all_files_shapes[each_files]:\r\n # each_shape -> ('79935', '58671')\r\n\r\n for im1nbr in im1shapes_neighbors[ each_shape[0] ]:\r\n if len( im1shapes[ im1nbr ] ) < frth_smallest_pixc:\r\n continue\r\n \r\n im1nbr_already_matched = [ temp_shapes for temp_shapes in acrs_all_files_shapes[each_files] if temp_shapes[0] == im1nbr ]\r\n if len( im1nbr_already_matched ) >= 1:\r\n continue\r\n \r\n # im1nbr not found\r\n for im2nbr in im2shapes_neighbors[ each_shape[1] ]:\r\n if len( im2shapes[ im2nbr ] ) < frth_smallest_pixc:\r\n continue\r\n \r\n if im1shapes_colors[ im1nbr ] != im2shapes_colors[im2nbr]:\r\n continue\r\n\r\n size_too_diff = check_shape_size( im1nbr, im2nbr, im1shapes, im2shapes )\r\n if size_too_diff is True:\r\n continue \r\n\r\n result = pixel_shapes_functions.check_shape_attached_near( im1shapes_boundaries[ each_shape[0] ], im1shapes_boundaries[im1nbr], \r\n im2shapes_boundaries[ each_shape[1] ], im2shapes_boundaries[im2nbr], im1.size )\r\n if result is not True:\r\n continue\r\n\r\n im1nbr_shp_coord_xy = im1shapes_in_shape_coord[ im1nbr ]\r\n im2nbr_shp_coord_xy = im2shapes_in_shape_coord[ im2nbr ]\r\n \r\n # matching from smaller shape.\r\n if len( im1shapes[ im1nbr ] ) > len( im2shapes[ im2nbr] ):\r\n im1im2nbr_match = same_shapes_functions.match_shape_while_moving_it( im2nbr_shp_coord_xy, im2nbr_shp_coord_xy, match_threshold=0.6 )\r\n else:\r\n im1im2nbr_match = same_shapes_functions.match_shape_while_moving_it( im2nbr_shp_coord_xy, im2nbr_shp_coord_xy, match_threshold=0.6 )\r\n\r\n if im1im2nbr_match is not True:\r\n continue \r\n\r\n result_matches[each_files].add( ( im1nbr, im2nbr ) )\r\n\r\n\r\n\r\n\r\nresult_dfile = across_all_files_ddir + \"nbr_matches.data\"\r\nif os.path.exists(result_dfile):\r\n with open (result_dfile, 'rb') as fp:\r\n result_shapes = pickle.load(fp)\r\n fp.close()\r\n \r\n for each_files in result_shapes:\r\n if each_files in result_matches.keys():\r\n for each_shapes in result_shapes[each_files]:\r\n if each_shapes not in result_matches[each_files]:\r\n result_matches[each_files].add( each_shapes )\r\n \r\n else:\r\n result_matches[each_files] = result_shapes[each_files] \r\n\r\n\r\n\r\n\r\n\r\nwith open(result_dfile, 'wb') as fp:\r\n pickle.dump(result_matches, fp)\r\nfp.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"algorithms/scnd_stage/find_nbr_matches_from_Nfnd_shapes.py","file_name":"find_nbr_matches_from_Nfnd_shapes.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"599153269","text":"import os\nimport sys\nimport time\n\ndef main():\n while(True):\n loop()\n #time.sleep(0.25) ## can reduce bottom-line flicker by including a slight delay -- coder discretion\n \ndef loop():\n sz = os.popen('stty size', 'r').read()\n cols = sz.split()[1]\n rows = sz.split()[0]\n myText = \"\"\n myText += \"Terminal window is {0} cols wide and {1} rows tall!\\n\".format(cols, rows)\n for x in range(int(rows) - 3):\n myText += \"\\n\"\n print(myText) ## trailing comma to prevent addition of '\\n'?\n \n ## stdout doesn't append '\\n' to the end of statements, but also is another import -- coder discretion\n #sys.stdout.write(myText) \n \nmain()","sub_path":"Python/termAware.py","file_name":"termAware.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"84623813","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/12/28 0028 22:45\n# @Author : StephenZ\n# @Site : \n# @File : Case26.py\n# @Purpose :\n# @Software : PyCharm\n# @Copyright: (c) StephenZ 2019\n# @Licence : <@2019>\nfrom typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n index = 0\n pop_num = 0\n for i in nums:\n if i in nums[index+1:]:\n nums.pop(index)\n nums.insert(0, 10)\n pop_num += 1\n index += 1\n for i in range(pop_num):\n nums.pop(0)\n return len(nums)\n\ndef test_solution():\n s = Solution()\n a1 = [1, 1, 2]\n a2 = [0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4]\n a3 = [1, 1, 1, 1, 1, 1, 1]\n z = (s.removeDuplicates(a2))\n print(z)\n\n\ntest_solution()\n","sub_path":"Case/Case26.py","file_name":"Case26.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"13206175","text":"\"\"\"\n\n练习10-6:加法运算 提示用户提供数值输入时,常出现的一个问题是,用户提供的是文本而不是数。在此情况下,当你尝试将输入转换为整数时,\n将引发ValueError异常。编写一个程序,提示用户输入两个数,再将其相加并打印结果。在用户输入的任何一个值不是数时都捕获ValueError异常,\n并打印一条友好的错误消息。对你编写的程序进行测试:先输入两个数,再输入一些文本而不是数。\n\n\"\"\"\n\ntry:\n a = int(input(\"请输入数字a\"))\n b = int(input(\"请输入数字b\"))\nexcept ValueError:\n print(\"请输入整数\")\n\nelse:\n print(a+b)","sub_path":"exercises/五/#10.6.py","file_name":"#10.6.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"480793262","text":"'''\n@author David W.H. Swenson\n'''\nimport time\n\nfrom nose.tools import (assert_equal, assert_items_equal)\nfrom nose.plugins.skip import SkipTest\n\nfrom test_helpers import (true_func, data_filename,\n assert_equal_array_array,\n assert_not_equal_array_array)\nfrom openpathsampling.openmm_engine import *\nfrom openpathsampling.snapshot import Snapshot\nfrom openpathsampling.snapshot import Momentum, Configuration\n\nimport simtk.openmm as mm\nfrom simtk.openmm import app\nfrom simtk import unit\n\n\nclass testOpenMMEngine(object):\n def setUp(self):\n template = paths.tools.snapshot_from_pdb(data_filename(\"ala_small_traj.pdb\"))\n topology = paths.tools.to_openmm_topology(template)\n\n # Generated using OpenMM Script Builder\n # http://builder.openmm.org\n\n forcefield = app.ForceField(\n 'amber96.xml', # solute FF\n 'tip3p.xml' # solvent FF\n )\n\n # OpenMM System\n system = forcefield.createSystem(\n topology,\n nonbondedMethod=app.PME,\n nonbondedCutoff=1.0*unit.nanometers,\n constraints=app.HBonds,\n rigidWater=True,\n ewaldErrorTolerance=0.0005\n )\n\n # OpenMM Integrator\n integrator = mm.LangevinIntegrator(\n 300*unit.kelvin,\n 1.0/unit.picoseconds,\n 2.0*unit.femtoseconds\n )\n integrator.setConstraintTolerance(0.00001)\n\n # Engine options\n options = {\n 'nsteps_per_frame': 10,\n 'platform': 'fastest',\n 'solute_indices' : range(22),\n 'n_frames_max' : 5,\n 'timestep': 2.0*unit.femtoseconds\n }\n\n self.engine = paths.OpenMMEngine(\n template,\n system,\n integrator,\n options\n )\n self.engine.initialize()\n\n context = self.engine.simulation.context\n zero_array = np.zeros((self.engine.n_atoms, 3))\n context.setPositions(self.engine.template.coordinates)\n context.setVelocities(u.Quantity(zero_array, u.nanometers / u.picoseconds))\n\n def teardown(self):\n pass\n\n def test_sanity(self):\n pass\n\n def test_snapshot_get(self):\n snap = self.engine.current_snapshot\n state = self.engine.simulation.context.getState(getVelocities=True,\n getPositions=True)\n pos = state.getPositions(asNumpy=True) / u.nanometers\n vel = state.getVelocities(asNumpy=True) / (u.nanometers / u.picoseconds)\n assert_equal_array_array(snap.coordinates / u.nanometers, pos)\n assert_equal_array_array(snap.velocities / (u.nanometers / u.picoseconds),\n vel)\n\n def test_snapshot_set(self):\n pdb_pos = (self.engine.template.coordinates / u.nanometers)\n testvel = []\n testpos = []\n for i in range(len(pdb_pos)):\n testpos.append(list(np.array(pdb_pos[i]) + \n np.array([1.0, 1.0, 1.0]))\n )\n testvel.append([0.1*i, 0.1*i, 0.1*i])\n\n self.engine.current_snapshot = Snapshot(\n coordinates=testpos,\n velocities=testvel\n )\n state = self.engine.simulation.context.getState(getPositions=True,\n getVelocities=True)\n sim_coords = state.getPositions(asNumpy=True) / u.nanometers\n sim_vels = state.getVelocities(asNumpy=True) / (u.nanometers/u.picoseconds)\n\n np.testing.assert_almost_equal(testpos, sim_coords, decimal=5)\n np.testing.assert_almost_equal(testvel, sim_vels, decimal=5)\n\n def test_generate_next_frame(self):\n snap0 = Snapshot(\n configuration=self.engine.current_snapshot.configuration.copy(),\n momentum=self.engine.current_snapshot.momentum.copy()\n )\n new_snap = self.engine.generate_next_frame()\n old_pos = snap0.coordinates / u.nanometers\n new_pos = new_snap.coordinates / u.nanometers\n old_vel = snap0.velocities / (u.nanometers / u.picoseconds)\n new_vel = new_snap.velocities / (u.nanometers / u.picoseconds)\n assert_equal(old_pos.shape, new_pos.shape)\n assert_equal(old_vel.shape, new_vel.shape)\n assert_not_equal_array_array(old_pos, new_pos)\n assert_not_equal_array_array(old_vel, new_vel)\n\n def test_generate(self):\n traj = self.engine.generate(self.engine.current_snapshot, [true_func])\n assert_equal(len(traj), self.engine.n_frames_max)\n\n def test_snapshot_timestep(self):\n assert_equal(self.engine.snapshot_timestep, 20 * u.femtoseconds)\n\n def test_momentum_setter(self):\n raise SkipTest()\n testvel = []\n for i in range(self.engine.n_atoms):\n testvel.append([0.1*i, 0.1*i, 0.1*i])\n self.engine.momentum = Momentum(velocities=testvel,\n kinetic_energy=None)\n np.testing.assert_almost_equal(self.engine.current_snapshot.velocities /\n (u.nanometers / u.picoseconds), testvel, decimal=5)\n\n def test_momentum_getter(self):\n momentum = self.engine.momentum\n state = self.engine.simulation.context.getState(getVelocities=True)\n velocities = state.getVelocities(asNumpy=True)\n assert_equal_array_array(\n momentum.velocities / (u.nanometers / u.picoseconds),\n velocities / (u.nanometers / u.picoseconds)\n )\n\n def test_configuration_setter(self):\n raise SkipTest()\n pdb_pos = (self.engine.template.coordinates / u.nanometers)\n testpos = []\n for i in range(len(pdb_pos)):\n testpos.append(list(np.array(pdb_pos[i]) + \n np.array([1.0, 1.0, 1.0]))\n )\n self.engine.configuration = Configuration(coordinates=testpos)\n np.testing.assert_almost_equal(self.engine.current_snapshot.coordinates /\n u.nanometers, testpos, decimal=5)\n\n def test_configuration_getter(self):\n config = self.engine.configuration\n state = self.engine.simulation.context.getState(getPositions=True)\n positions = state.getPositions(asNumpy=True)\n assert_equal_array_array(\n config.coordinates / u.nanometers,\n positions / u.nanometers\n )\n","sub_path":"openpathsampling/tests/testopenmmengine.py","file_name":"testopenmmengine.py","file_ext":"py","file_size_in_byte":6484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"630408484","text":"# Import standard Python Modules\nimport time\nimport sys\n\n# Import Adafruit_DHT Module\nimport Adafruit_DHT\n\n# Import RPi.GPIO Module\nimport RPi.GPIO as GPIO\n\ndef main():\n\t# Setup GPIO setmode\n\tGPIO.setmode(GPIO.BCM)\n\n\t# Dict with GPIO pin numbers for temperature alert\n\tpinTemperature={\"temperatura baja\":8, \"temperatura normal\":9, \"temperatura alta\":10}\n\n\t# Dict with GPIO pin numbers for humidty alert\n\tpinHumidity={\"humedad baja\":21, \"humedad normal\":22, \"humedad alta\":23}\n\n\t# Set GPIO pin signal OUT and initial value \"shutdown\"\n\tGPIO.setup(list(pinTemperature.values())+list(pinHumidity.values()), GPIO.OUT, initial=GPIO.LOW)\n\t\n\t# Control vars\n\tlastStateTemperature=\"\"\n\tlastStateHumidity=\"\"\n\n\twhile True:\n\t\t# Retreive data from DHT11 sensor\n\t\thumidity, temperature = Adafruit_DHT.read_retry(sensor=Adafruit_DHT.DHT11, pin=4, retries=15, delay_seconds=2)\n\t\t\n\t\t# Check Retreive Data\n\t\tif humidity is not None and temperature is not None:\n\t\t\tprint('Temp={}° Humidity={}%'.format(temperature, humidity))\n\t\t\t\n\t\t\tif(temperature<35):\n\t\t\t\tstateTemperature=\"temperatura baja\"\n\t\t\telif(temperature>40):\n\t\t\t\tstateTemperature=\"temperatura alta\"\n\t\t\telse:\n\t\t\t\tstateTemperature=\"temperatura normal\"\n\n\t\t\tif(humidity<50):\n\t\t\t\tstateHumidity=\"humedad baja\"\n\t\t\telif(humidity>90):\n\t\t\t\tstateHumidity=\"humedad alta\"\n\t\t\telse:\n\t\t\t\tstateHumidity=\"humedad normal\"\n\n\t\t\tif(lastStateTemperature!=stateTemperature):\n\t\t\t\tGPIO.output(list(pinTemperature.values()), GPIO.LOW)\n\t\t\t\tGPIO.output(pinTemperature.get(stateTemperature), GPIO.HIGH)\n\t\t\t\tlastStateTemperature=stateTemperature\n\t\t\t\tprint(\"cambio de estado a {}\".format(stateTemperature))\n\t\t\t\n\t\t\tif(lastStateHumidity!=stateHumidity):\n\t\t\t\tGPIO.output(list(pinHumidity.values()), GPIO.LOW)\n\t\t\t\tGPIO.output(pinHumidity.get(stateHumidity), GPIO.HIGH)\n\t\t\t\tlastStateHumidity=stateHumidity\n\t\t\t\tprint(\"cambio de estado a {}\".format(stateHumidity))\n\t\t\t\n\t\telse:\n\t\t\tprint('Failed to get reading. Try again!')\n\nif __name__==\"__main__\":\n\ttry:\n\t\tmain()\n\texcept:\n\t\tprint(\"{} line {}\".format(sys.exc_info()[0], sys.exc_info()[-1].tb_lineno))\n\t\tGPIO.cleanup()","sub_path":"Project 1 - Smart Garden/ambient temp and humidity.py","file_name":"ambient temp and humidity.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"641410242","text":"# coding:utf-8\n'''\nCreated on 2017年5月8日\n\n@author: hepeng\n'''\n\nfrom django.conf.urls import url, include\nfrom rest_framework.routers import DefaultRouter\nfrom connector.views import ChatMessageListView, URobotView, \\\n ChatRoomView, IntoChatRoomMessageCreateView, IntoChatRoomCreateView, \\\n DropOutChatRoomCreateView, MemberInfoCreateView, GetUrobotQucode, UnotityCallback, CreateRoomTaskView, \\\n ChatRoomKickingView, CreateRoomCallbackView, ModifyRoomNameView, OpenKickingView, ShowKickingView, RebotRoomView, \\\n Qrcode, RobotBlockedView, WhiteMemberCallBackView, SendMessageFailView, UpdateRoomMembers, RoomOver, \\\n CheckChatRoomStatus, ChatRoomInfoModify, ChatRoomAdminChange, TransferMasterCallBackView, TransferMasterApiView\nfrom django.views.decorators.csrf import csrf_exempt\nfrom connector import views\n\nrouter = DefaultRouter()\n#群内实时消息回调\nrouter.register(r'chatmessages', ChatMessageListView, base_name='chatmessages')\nrouter.register(r'urobot', URobotView, base_name='urobot')\nrouter.register(r'chatrooms', ChatRoomView, base_name='chatrooms')\n\nurlpatterns = [\n url(r'', include(router.urls)),\n # 机器人入群回调\n url(r'intochatroommessage/$', IntoChatRoomMessageCreateView.as_view(), name='intochatroommessage'),\n # 成员入群\n url(r'intochatroom/$',IntoChatRoomCreateView.as_view(), name='intochatroom'),\n # 成员退群\n url(r'dropoutchatroom/$', DropOutChatRoomCreateView.as_view(), name='dropoutchatroom'),\n # 获取群成员回调\n url(r'memberinfo/$', csrf_exempt(MemberInfoCreateView.as_view()), name='memberinfo'),\n]\n\nurlpatterns += [\n url(r'^qrcode/$', csrf_exempt(GetUrobotQucode.as_view()), name='get_urobot_qrcode'),\n # 扫码入群回调\n url(r'^unotity/callback/$', csrf_exempt(UnotityCallback.as_view()), name='unotity_callback'),\n # 建群任务\n url(r'^createroomtask/$', csrf_exempt(CreateRoomTaskView.as_view()), name=\"createroomtask\"),\n url(r'^kickingchatroom/$', csrf_exempt(ChatRoomKickingView.as_view()), name='kickingmember'),\n # 建群成功回调\n url(r'^createroomdone/$', csrf_exempt(CreateRoomCallbackView.as_view()), name='createroomdone'),\n # 修改群名(java回调)\n url(r'^modifyroomname/$', csrf_exempt(ModifyRoomNameView.as_view()), name='modifyroomname'),\n # 私拉踢人 - 护群\n url(r'^kickingtask/$', csrf_exempt(OpenKickingView.as_view()), name='kickingtask'),\n url(r'^showtask/$', csrf_exempt(ShowKickingView.as_view()), name='showtask'),\n # 机器人是否在群中回调\n url(r'^rebotroom/$', csrf_exempt(RebotRoomView.as_view()), name='rebotroom'),\n url(r'^make/qrcode/$', csrf_exempt(Qrcode.as_view()), name='make_qrcode'),\n # 机器人被封回调\n url(r'^robotblocked/$', csrf_exempt(RobotBlockedView.as_view()), name='robotblocked'),\n # 设置白名单成员\n url(r'^whitemember/$', csrf_exempt(WhiteMemberCallBackView.as_view()), name='whitemember'),\n # 消息发送失败接口回调\n url(r'^sendmsgfail/$', csrf_exempt(SendMessageFailView.as_view()), name='sendmsgfail'),\n\n # 提供给java - 获取群成员的接口\n url(r'^updateroommembers/$', csrf_exempt(UpdateRoomMembers.as_view()), name='updateroommembers'),\n # 提供给java - 注销群\n url(r'^roomover/$', csrf_exempt(RoomOver.as_view()), name='roomover'),\n # 提供给java - 查询群状态\n url(r'^checkchatroomstatus/$', csrf_exempt(CheckChatRoomStatus.as_view()), name='checkchatroomstatus'),\n # 提供给java - 修改群名\n url(r'^chatroominfomodify/$', csrf_exempt(ChatRoomInfoModify.as_view()), name='chatroominfomodify'),\n # 提供给java - 转让 群主\n url(r'^chatroomadminchange/$', csrf_exempt(ChatRoomAdminChange.as_view()), name='chatroomadminchange'),\n # 提供给java - 转让 群主(建群方)\n url(r'^transferadminuser/$', csrf_exempt(TransferMasterApiView.as_view()), name='transferadminuser'),\n # 建群方转让群主接口回调地址\n url(r'^transfermaster/callback/$', csrf_exempt(TransferMasterCallBackView.as_view()), name='transfermaster'),\n]\n","sub_path":"connector/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"349834987","text":"import re\n\nif __name__ == '__main__':\n lines = []\n num_lines = int(input())\n for nl in range(num_lines):\n lines.append(input())\n \n queries = []\n num_queries = int(input())\n for nq in range(num_queries):\n queries.append(input())\n \n for q in queries:\n prog = re.compile(\"\\\\b(\" + q + \"|\" + q.replace(\"our\", \"or\") + \")\\\\b\")\n\n cnt = 0\n for l in lines:\n cnt += len(prog.findall(l))\n print(cnt)","sub_path":"7.Applications/uk_and_us_part2.py","file_name":"uk_and_us_part2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"44874986","text":"#!/usr/bin/env python\nimport os\nimport thread\nfrom time import sleep\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(2, GPIO.OUT)\nGPIO.setup(3,GPIO.IN)\ncheck = False\ndef do_thread():\n global check\n while(True):\n\n if(GPIO.input(3) == False):\n check = True\n else:\n check = False\n sleep(0.01)\nthread.start_new_thread(do_thread,())\nwhile(True):\n if(check == False):\n GPIO.output(2,GPIO.HIGH)\n sleep(0.25)\n GPIO.output(2,GPIO.LOW)\n sleep(0.25)\n else :\n GPIO.output(2,GPIO.HIGH)\n\n","sub_path":"learn_python/test_gpio_python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"397708221","text":"# -*- coding: utf-8 -*-\n# Copyright 2014, 2015 Metaswitch Networks\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nfelix.devices\n~~~~~~~~~~~~\n\nUtility functions for managing devices in Felix.\n\"\"\"\nimport logging\nfrom calico.felix.actor import Actor, actor_message\nimport os\nimport socket\nimport struct\n\nfrom calico import common\nfrom calico.felix import futils\n\n# Logger\nfrom calico.felix.futils import FailedSystemCall\n\n_log = logging.getLogger(__name__)\n\n\ndef configure_global_kernel_config():\n \"\"\"\n Configures the global kernel config. In particular, sets the flags\n that we rely on to ensure security, such as the kernel's RPF check.\n\n :raises BadKernelConfig if a problem is detected.\n \"\"\"\n\n # For IPv4, we rely on the kernel's reverse path filtering to prevent\n # workloads from spoofing their IP addresses.\n #\n # The RPF check for a particular interface is controlled by several\n # sysctls:\n #\n # - ipv4.conf.all.rp_filter is a global override\n # - ipv4.conf.default.rp_filter controls the value that is set on a newly\n # created interface\n # - ipv4.conf..rp_filter controls a particular interface.\n #\n # The algorithm for combining the global override and per-interface values\n # is to take the *numeric* maximum between the two. The values are:\n # 0=off, 1=strict, 2=loose. \"loose\" is not suitable for Calico since it\n # would allow workloads to spoof packets from other workloads on the same\n # host. Hence, we need the global override to be <=1 or it would override\n # the per-interface setting to \"strict\" that we require.\n #\n # We bail out rather than simply setting it because setting 2, \"loose\",\n # is unusual and it is likely to have been set deliberately.\n ps_name = \"/proc/sys/net/ipv4/conf/all/rp_filter\"\n rp_filter = int(_read_proc_sys(ps_name))\n if rp_filter > 1:\n _log.critical(\"Kernel's RPF check is set to 'loose'. This would \"\n \"allow endpoints to spoof their IP address. Calico \"\n \"requires net.ipv4.conf.all.rp_filter to be set to \"\n \"0 or 1.\")\n raise BadKernelConfig(\"net.ipv4.conf.all.rp_filter set to 'loose'\")\n\n # Make sure the default for new interfaces is set to strict checking so\n # that there's no race when a new interface is added and felix hasn't\n # configured it yet.\n _write_proc_sys(\"/proc/sys/net/ipv4/conf/default/rp_filter\", \"1\")\n\n # We use sysfs for inspecting devices.\n if not os.path.exists(\"/sys/class/net\"):\n raise BadKernelConfig(\"Felix requires sysfs to be mounted at /sys\")\n\n\ndef interface_exists(interface):\n \"\"\"\n Checks if an interface exists.\n :param str interface: Interface name\n :returns: True if interface device exists\n\n Note: this checks that the interface exists at a particular point in time\n but the caller needs to be defensive to the interface disappearing before\n it has a chance to access it.\n \"\"\"\n return os.path.exists(\"/sys/class/net/%s\" % interface)\n\n\ndef list_interface_ips(ip_type, interface):\n \"\"\"\n List IP addresses for which there are routes to a given interface.\n :param str ip_type: IP type, either futils.IPV4 or futils.IPV6\n :param str interface: Interface name\n :returns: a set of all addresses for which there is a route to the device.\n \"\"\"\n ips = set()\n\n if ip_type == futils.IPV4:\n data = futils.check_call(\n [\"ip\", \"route\", \"list\", \"dev\", interface]).stdout\n else:\n data = futils.check_call(\n [\"ip\", \"-6\", \"route\", \"list\", \"dev\", interface]).stdout\n\n lines = data.split(\"\\n\")\n\n _log.debug(\"Existing routes to %s : %s\", interface, lines)\n\n for line in lines:\n # Example of the lines we care about is (having specified the\n # device above): \"10.11.2.66 proto static scope link\"\n words = line.split()\n\n if len(words) > 1:\n ip = words[0]\n if common.validate_ip_addr(ip, futils.IP_TYPE_TO_VERSION[ip_type]):\n # Looks like an IP address. Note that we here are ignoring\n # routes to networks configured when the interface is created.\n ips.add(words[0])\n\n _log.debug(\"Found existing IP addresses : %s\", ips)\n\n return ips\n\n\ndef configure_interface_ipv4(if_name):\n \"\"\"\n Configure the various proc file system parameters for the interface for\n IPv4.\n\n Specifically,\n - Allow packets from controlled interfaces to be directed to localhost\n - Enable proxy ARP\n - Enable the kernel's RPF check.\n\n :param if_name: The name of the interface to configure.\n :returns: None\n \"\"\"\n # Enable the kernel's RPF check, which ensures that a VM cannot spoof\n # its IP address.\n _write_proc_sys('/proc/sys/net/ipv4/conf/%s/rp_filter' % if_name, 1)\n _write_proc_sys('/proc/sys/net/ipv4/conf/%s/route_localnet' % if_name, 1)\n _write_proc_sys(\"/proc/sys/net/ipv4/conf/%s/proxy_arp\" % if_name, 1)\n _write_proc_sys(\"/proc/sys/net/ipv4/neigh/%s/proxy_delay\" % if_name, 0)\n\n\ndef configure_interface_ipv6(if_name, proxy_target):\n \"\"\"\n Configure an interface to support IPv6 traffic from an endpoint.\n - Enable proxy NDP on the interface.\n - Program the given proxy target (gateway the endpoint will use).\n\n :param if_name: The name of the interface to configure.\n :param proxy_target: IPv6 address which is proxied on this interface for\n NDP.\n :returns: None\n :raises: FailedSystemCall\n \"\"\"\n _write_proc_sys(\"/proc/sys/net/ipv6/conf/%s/proxy_ndp\" % if_name, 1)\n\n # Allows None if no IPv6 proxy target is required.\n if proxy_target:\n futils.check_call([\"ip\", \"-6\", \"neigh\", \"add\",\n \"proxy\", str(proxy_target), \"dev\", if_name])\n\n\ndef _read_proc_sys(name):\n with open(name, \"rb\") as f:\n return f.read().strip()\n\n\ndef _write_proc_sys(name, value):\n with open(name, \"wb\") as f:\n f.write(str(value))\n\n\ndef add_route(ip_type, ip, interface, mac):\n \"\"\"\n Add a route to a given interface (including arp config).\n Errors lead to exceptions that are not handled here.\n\n Note that we use \"ip route replace\", since that overrides any imported\n routes to the same IP, which might exist in the middle of a migration.\n\n :param ip_type: Type of IP (IPV4 or IPV6)\n :param str ip: IP address\n :param str interface: Interface name\n :param str mac: MAC address. May not be None unless ip is None.\n :raises FailedSystemCall\n \"\"\"\n if mac is None and ip:\n raise ValueError(\"mac must be supplied if ip is provided\")\n\n if ip_type == futils.IPV4:\n futils.check_call(['arp', '-s', ip, mac, '-i', interface])\n futils.check_call([\"ip\", \"route\", \"replace\", ip, \"dev\", interface])\n else:\n futils.check_call([\"ip\", \"-6\", \"route\", \"replace\", ip, \"dev\",\n interface])\n\n\ndef del_route(ip_type, ip, interface):\n \"\"\"\n Delete a route to a given interface (including arp config).\n\n :param ip_type: Type of IP (IPV4 or IPV6)\n :param str ip: IP address\n :param str interface: Interface name\n :raises FailedSystemCall\n \"\"\"\n if ip_type == futils.IPV4:\n futils.check_call(['arp', '-d', ip, '-i', interface])\n futils.check_call([\"ip\", \"route\", \"del\", ip, \"dev\", interface])\n else:\n futils.check_call([\"ip\", \"-6\", \"route\", \"del\", ip, \"dev\", interface])\n\n\ndef set_routes(ip_type, ips, interface, mac=None, reset_arp=False):\n \"\"\"\n Set the routes on the interface to be the specified set.\n\n :param ip_type: Type of IP (IPV4 or IPV6)\n :param set ips: IPs to set up (any not in the set are removed)\n :param str interface: Interface name\n :param str mac|NoneType: MAC address. May not be none unless ips is empty.\n :param bool reset_arp: Reset arp. Only valid if IPv4.\n \"\"\"\n if mac is None and ips:\n raise ValueError(\"mac must be supplied if ips is not empty\")\n if reset_arp and ip_type != futils.IPV4:\n raise ValueError(\"reset_arp may only be supplied for IPv4\")\n\n current_ips = list_interface_ips(ip_type, interface)\n\n removed_ips = (current_ips - ips)\n for ip in removed_ips:\n del_route(ip_type, ip, interface)\n remove_conntrack_flows(removed_ips, 4 if ip_type == futils.IPV4 else 6)\n for ip in (ips - current_ips):\n add_route(ip_type, ip, interface, mac)\n if reset_arp:\n for ip in (ips & current_ips):\n futils.check_call(['arp', '-s', ip, mac, '-i', interface])\n\n\ndef interface_up(if_name):\n \"\"\"\n Checks whether a given interface is up.\n\n Check this by examining the operstate of the interface, which is the\n highest level \"is it ready to work with\" flag.\n\n :param str if_name: Interface name\n :returns: True if interface up, False if down or cannot detect\n \"\"\"\n operstate_filename = '/sys/class/net/%s/operstate' % if_name\n try:\n with open(operstate_filename, 'r') as f:\n oper_state = f.read().strip()\n except IOError as e:\n # If we fail to check that the interface is up, then it has probably\n # gone under our feet or is flapping.\n _log.warning(\"Failed to read state of interface %s (%s) - assume \"\n \"down/absent: %r.\", if_name, operstate_filename, e)\n return False\n else:\n _log.debug(\"Interface %s has state %s\", if_name, oper_state)\n return oper_state == \"up\"\n\n\ndef remove_conntrack_flows(ip_addresses, ip_version):\n \"\"\"\n Removes any conntrack entries that use any of the given IP\n addresses in their source/destination.\n \"\"\"\n assert ip_version in (4, 6)\n for ip in ip_addresses:\n _log.debug(\"Removing conntrack rules for %s\", ip)\n for direction in [\"--orig-src\", \"--orig-dst\",\n \"--reply-src\", \"--reply-dst\"]:\n try:\n futils.check_call([\"conntrack\", \"--family\",\n \"ipv%s\" % ip_version, \"--delete\",\n direction, ip])\n except FailedSystemCall as e:\n if e.retcode == 1 and \"0 flow entries\" in e.stderr:\n # Expected if there are no flows.\n _log.debug(\"No conntrack entries found for %s/%s.\",\n ip, direction)\n else:\n # Suppress the exception, conntrack entries will timeout\n # and it's hard to think of an example where killing and\n # restarting felix would help.\n _log.exception(\"Failed to remove conntrack flows for %s. \"\n \"Ignoring.\", ip)\n\n\n# These constants map to constants in the Linux kernel. This is a bit poor, but\n# the kernel can never change them, so live with it for now.\nRTMGRP_LINK = 1\n\nNLMSG_NOOP = 1\nNLMSG_ERROR = 2\n\nRTM_NEWLINK = 16\nRTM_DELLINK = 17\n\nIFLA_IFNAME = 3\nIFLA_OPERSTATE = 16\nIF_OPER_UP = 6\n\n\nclass RTNetlinkError(Exception):\n \"\"\"\n How we report an error message.\n \"\"\"\n pass\n\n\nclass InterfaceWatcher(Actor):\n def __init__(self, update_splitter):\n super(InterfaceWatcher, self).__init__()\n self.update_splitter = update_splitter\n self.interfaces = {}\n\n @actor_message()\n def watch_interfaces(self):\n \"\"\"\n Detects when interfaces appear, sending notifications to the update\n splitter.\n\n :returns: Never returns.\n \"\"\"\n # Create the netlink socket and bind to RTMGRP_LINK,\n s = socket.socket(socket.AF_NETLINK,\n socket.SOCK_RAW,\n socket.NETLINK_ROUTE)\n s.bind((os.getpid(), RTMGRP_LINK))\n\n # A dict that remembers the detailed flags of an interface\n # when we last signalled it as being up. We use this to avoid\n # sending duplicate interface_update signals.\n if_last_flags = {}\n\n while True:\n # Get the next set of data.\n data = s.recv(65535)\n\n # First 16 bytes is the message header; unpack it.\n hdr = data[:16]\n data = data[16:]\n msg_len, msg_type, flags, seq, pid = struct.unpack(\"=LHHLL\", hdr)\n\n if msg_type == NLMSG_NOOP:\n # Noop - get some more data.\n continue\n elif msg_type == NLMSG_ERROR:\n # We have got an error. Raise an exception which brings the\n # process down.\n raise RTNetlinkError(\"Netlink error message, header : %s\",\n futils.hex(hdr))\n _log.debug(\"Netlink message type %s len %s\", msg_type, msg_len)\n\n if msg_type in [RTM_NEWLINK, RTM_DELLINK]:\n # A new or removed interface. Read the struct\n # ifinfomsg, which is 16 bytes.\n hdr = data[:16]\n data = data[16:]\n _, _, _, index, flags, _ = struct.unpack(\"=BBHiII\", hdr)\n _log.debug(\"Interface index %s flags %x\", index, flags)\n\n # Bytes left is the message length minus the two headers of 16\n # bytes each.\n remaining = msg_len - 32\n\n # Loop through attributes, looking for the pieces of\n # information that we need.\n ifname = None\n operstate = None\n while remaining:\n # The data content is an array of RTA objects, each of\n # which has a 4 byte header and some data.\n rta_len, rta_type = struct.unpack(\"=HH\", data[:4])\n\n # This check comes from RTA_OK, and terminates a string of\n # routing attributes.\n if rta_len < 4:\n break\n\n rta_data = data[4:rta_len]\n\n # Remove the RTA object from the data. The length to jump\n # is the rta_len rounded up to the nearest 4 byte boundary.\n increment = int((rta_len + 3) / 4) * 4\n data = data[increment:]\n remaining -= increment\n\n if rta_type == IFLA_IFNAME:\n ifname = rta_data[:-1]\n _log.debug(\"IFLA_IFNAME: %s\", ifname)\n elif rta_type == IFLA_OPERSTATE:\n operstate, = struct.unpack(\"=B\", rta_data[:1])\n _log.debug(\"IFLA_OPERSTATE: %s\", operstate)\n\n if (ifname and\n (msg_type == RTM_DELLINK or operstate != IF_OPER_UP)):\n # The interface is down; make sure the other actors know\n # about it.\n self.update_splitter.on_interface_update(ifname,\n iface_up=False)\n # Remove any record we had of the interface so that, when\n # it goes back up, we'll report that.\n if_last_flags.pop(ifname, None)\n\n if (ifname and\n msg_type == RTM_NEWLINK and\n operstate == IF_OPER_UP and\n (ifname not in if_last_flags or\n if_last_flags[ifname] != flags)):\n # We only care about notifying when a new\n # interface is usable, which - according to\n # https://www.kernel.org/doc/Documentation/networking/\n # operstates.txt - is fully conveyed by the\n # operstate. (When an interface goes away, it\n # automatically takes its routes with it.)\n _log.debug(\"New network interface : %s %x\", ifname, flags)\n if_last_flags[ifname] = flags\n self.update_splitter.on_interface_update(ifname,\n iface_up=True)\n\n\nclass BadKernelConfig(Exception):\n pass\n","sub_path":"calico/felix/devices.py","file_name":"devices.py","file_ext":"py","file_size_in_byte":16552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"338686231","text":"# Given a string find longest palindromic subsequence in this string.\n\n# Example 1:\n# Input:\n# S = 'abcdca'\n# Output: 5\n# Explanation: acdca\n# ========================================================================================\n# Algorithm:\n# Similar to substring except use the table to keep track of longest length instead of boolean flag.\n# If the characters on the end are the same, add the longest palindrome plus 2.\n# Or get the max from [i][j-1] or [i+1][j]\n# TC: O(n^2). Two nested traversals are needed.\n# SC: O(n^2). Matrix of size n*n is needed to store the dp array.\n# ========================================================================================\n\n\n# Method-1: Dynamic programming\n# This function prints longest palindromic subsequence\n# and returns length of the longest palindromic subsequence\ndef longest_palindromic_subsequence(s):\n\n n = len(s)\n idx = []\n seq = \"\"\n\n dp = [[0 for j in range(n)] for i in range(n)]\n\n # All subsequence of length 1 are palindromes\n i = 0\n while i < n:\n dp[i][i] = 1\n i += 1\n\n # Find palindromic subsequence of length 2\n start = 0\n i = 0\n while i < n - 1:\n if s[i] == s[i+1]:\n dp[i][i+1] = 2\n else:\n dp[i][i+1] = 1\n i += 1\n\n # Find palindromic subsequence of length >= 3\n # l is length of substring\n subseq_len = 3\n while subseq_len <= n:\n # Starting index i\n i = 0\n while i < n - subseq_len + 1:\n\n # calculate the ending index of subsequence\n j = i + subseq_len - 1\n\n # Check for subsequence from ith index\n # to jth index\n if s[i] == s[j]:\n dp[i][j] = 2 + dp[i+1][j-1]\n else:\n dp[i][j] = max(dp[i+1][j], dp[i][j-1])\n\n i += 1\n subseq_len += 1\n\n return dp[0][n-1]\n\n\n# Method-2: Recursive\ndef calculate_recursive(s, start, subseq_len):\n\n if subseq_len == 1:\n return 1\n\n if subseq_len == 0:\n return 0\n\n if s[start] == s[start+subseq_len-1]:\n return 2 + calculate_recursive(s, start+1, subseq_len-2)\n else:\n return max(calculate_recursive(s, start+1, subseq_len-1), calculate_recursive(s, start, subseq_len-1))\n\n\nif __name__ == \"__main__\":\n\n input_str = 'abcdca' # output: 5, acdca\n # print(calculate_recursive(input_str, 0, len(input_str)))\n print(longest_palindromic_subsequence(input_str))\n\n","sub_path":"code/set_19_string/longest_palindromic_subsequence.py","file_name":"longest_palindromic_subsequence.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"26495252","text":"#######################################################################\n# Unit tests for qc/cellranger.py\n#######################################################################\n\nimport unittest\nimport os\nimport shutil\nimport tempfile\nfrom auto_process_ngs.mock import MockAnalysisProject\nfrom auto_process_ngs.mock import UpdateAnalysisProject\nfrom auto_process_ngs.analysis import AnalysisProject\n\nfrom auto_process_ngs.qc.cellranger import CellrangerCount\n\nclass TestCellrangerCount(unittest.TestCase):\n def setUp(self):\n # Create a temp working dir\n self.dirn = tempfile.mkdtemp(suffix='TestCellrangerCount')\n # Make mock analysis project\n p = MockAnalysisProject(\"PJB\",(\"PJB1_S1_R1_001.fastq.gz\",\n \"PJB1_S1_R2_001.fastq.gz\",\n \"PJB2_S2_R1_001.fastq.gz\",\n \"PJB2_S2_R2_001.fastq.gz\",),\n metadata={ 'Organism': 'Human',\n 'Single cell platform':\n \"10xGenomics Chromium 3'v3\" })\n p.create(top_dir=self.dirn)\n self.project = AnalysisProject(\"PJB\",os.path.join(self.dirn,\"PJB\"))\n def tearDown(self):\n # Remove the temporary test directory\n shutil.rmtree(self.dirn)\n def test_cellrangercount(self):\n \"\"\"\n CellrangerCount: check outputs from cellranger count\n \"\"\"\n # Add cellranger count outputs\n UpdateAnalysisProject(self.project).add_cellranger_count_outputs()\n # Do tests\n count_dir = os.path.join(self.project.qc_dir,\"cellranger_count\",\"PJB1\")\n cellranger_count = CellrangerCount(count_dir)\n self.assertEqual(cellranger_count.dir,count_dir)\n self.assertEqual(cellranger_count.sample_name,\"PJB1\")\n self.assertEqual(cellranger_count.metrics_csv,\n os.path.join(count_dir,\"outs\",\"metrics_summary.csv\"))\n self.assertEqual(cellranger_count.web_summary,\n os.path.join(count_dir,\"outs\",\"web_summary.html\"))\n def test_cellrangercount_missing_directory(self):\n \"\"\"\n CellrangerCount: handle missing directory\n \"\"\"\n # Do tests\n count_dir = os.path.join(self.project.qc_dir,\"cellranger_count\",\"PJB1\")\n cellranger_count = CellrangerCount(count_dir)\n self.assertRaises(OSError,\n getattr,cellranger_count,'dir')\n self.assertEqual(cellranger_count.sample_name,None)\n self.assertRaises(OSError,\n getattr,cellranger_count,'metrics_csv')\n self.assertRaises(OSError,\n getattr,cellranger_count,'web_summary')\n","sub_path":"auto_process_ngs/test/qc/test_cellranger.py","file_name":"test_cellranger.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"549147853","text":"from django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\n\nfrom rest_framework.schemas import get_schema_view\n\nfrom website.views import Index, ReactView\nfrom .views import CustomRegistrationView, CustomLogoutView, CustomModelRegistrationView\nfrom .urls_api import router\n\nschema_view = get_schema_view(title='Pastebin API')\n\nurlpatterns = [\n url(r'^schema/$', schema_view),\n url(r'^api/', include(router.urls)),\n url(r'^admin/', include('admin_honeypot.urls', namespace='admin_honeypot')),\n url(r'^admin_company/', admin.site.urls),\n url(r'^accounts/register/$', CustomRegistrationView.as_view(), name='registration_register'),\n url(r'^accounts/model_register/$', CustomModelRegistrationView.as_view(), name='registration_model_register'),\n url(r'^accounts/logout/$', CustomLogoutView.as_view()),\n url(r'^accounts/', include('registration.backends.default.urls')),\n url(r'^react/', ReactView.as_view(), name='react'),\n url(r'^terceros/', include('dramorapp_terceros.urls', namespace='terceros')),\n url(r'^$', Index.as_view(), name='index'),\n url(r'^dramor/', include('website.urls', namespace='website')),\n url(r'^dramor_perfiles/', include('website_perfiles.urls', namespace='perfiles_modelos')),\n url(r'^admin_dashboards/', include('admin_dashboards.urls', namespace='admin_dashboard')),\n url(r'^admin_modelos/', include('admin_modelos.urls', namespace='admin_modelos')),\n url(r'^admin_redis/', include('admin_redis.urls', namespace='admin_redis')),\n url(r'^admin_servicios/', include('dramorapp_servicios.urls', namespace='admin_servicios')),\n url(r'^geografia/', include('geografia.urls', namespace='geografia')),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^tinymce/', include('tinymce.urls')),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"DrAmor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"115574971","text":"# Author: Florian Wagner \n# Copyright (c) 2018, New York University\n#\n# This file is part of Moana.\n\n\"\"\"Smoothing model for Moana scRNA-Seq classifiers.\"\"\"\n\nimport logging\nfrom typing import Union, Iterable\nfrom collections import OrderedDict\nimport time\n\nfrom sklearn.metrics.pairwise import pairwise_distances\nimport numpy as np\n\nfrom .. import preprocess as pp\nfrom ..core import ExpMatrix\nfrom ..util import get_sel_components\nfrom .util import apply_smoothing\nfrom . import PCAModel\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass SmoothingModel:\n \"\"\"A smoothing model for single-cell RNA-Seq data.\"\"\"\n\n def __init__(self, k: int = 1, d: int = 20,\n dither: Union[float, int] = 0,\n components: Union[Iterable[int], int] = 2,\n min_transcript_frac: Union[float, int] = 0.9,\n smooth_min_transcripts: Union[int, float] = 500,\n seed: int = 0) -> None:\n\n self.k = k\n self.d = d\n self.dither = dither\n self.sel_components = get_sel_components(components)\n self.seed = seed\n self.min_transcript_frac = min_transcript_frac\n self.smooth_min_transcripts = smooth_min_transcripts\n\n self.transcript_count_ = None\n self.smooth_pca_models_ = None\n\n\n @property\n def genes_(self):\n return next(iter(self.smooth_pca_models_.values())).genes_\n\n\n def _require_trained_model(self) -> None:\n if self.smooth_pca_models_ is None:\n raise RuntimeError('You must train the model first using \"fit()\"!')\n\n\n def _select_pca_model(self, transcript_count: float,\n min_transcript_frac: Union[float, int] = None):\n\n if min_transcript_frac is None:\n min_transcript_frac = self.min_transcript_frac\n\n #try:\n # selected_count, selected_model = next(iter(smooth_pca_models.items()))\n #except StopIteration:\n # raise ValueError('No smoothing PCA models available!')\n\n iter_models = iter(self.smooth_pca_models_.items())\n\n selected_count, selected_model = next(iter_models)\n\n for count, pca_model in iter_models:\n if count * min_transcript_frac <= transcript_count:\n selected_model = pca_model\n selected_count = count\n else:\n break\n\n return selected_model, selected_count\n\n\n def fit(self, matrix: ExpMatrix, is_smoothed: bool = False) -> None:\n \"\"\"Train the smoothing model.\"\"\"\n\n if not is_smoothed:\n # apply kNN-smoothing\n smoothed_matrix = pp.knn_smoothing(\n matrix, self.k, d=self.d, dither=self.dither, seed=self.seed)\n else:\n smoothed_matrix = matrix\n\n # determine median transcript count of smoothed matrix\n transcript_count = float(smoothed_matrix.sum(axis=0).median())\n\n if transcript_count < self.smooth_min_transcripts:\n raise ValueError(\n 'The given matrix has fewer transcripts per cell than the '\n 'value of the \"smooth_min_transcripts\" parameter. Cannot '\n 'construct a smoothing model under these conditions.')\n\n # construct PCA models for down-scaled data\n smooth_pca_models = []\n new_transcript_count = transcript_count\n scaled_matrix = smoothed_matrix\n while new_transcript_count >= self.smooth_min_transcripts:\n scaled_matrix = scaled_matrix / 2.0 # creates a copy, unlike \"/=\"!\n new_transcript_count /= 2\n _LOGGER.info('Generating PCA model for %.1f transcripts.',\n new_transcript_count)\n\n pca_model = PCAModel(self.sel_components, self.seed)\n pca_model.fit(scaled_matrix)\n smooth_pca_models.append((new_transcript_count, pca_model))\n\n #_, scaled_pca_model = pp.pca(\n # scaled_matrix, self.d, seed=self.seed)\n #smooth_pca_models.append((new_transcript_count, scaled_pca_model))\n\n self.transcript_count_ = transcript_count\n self.smooth_pca_models_ = OrderedDict(reversed(smooth_pca_models))\n\n\n def transform(self, matrix: ExpMatrix,\n min_transcript_frac: Union[float, int] = None) -> ExpMatrix:\n \"\"\"Apply the smoothing model.\"\"\"\n\n self._require_trained_model()\n\n if min_transcript_frac is None:\n min_transcript_frac = self.min_transcript_frac\n\n t0_total = time.time()\n\n num_transcripts = matrix.sum(axis=0)\n cur_transcript_count = num_transcripts.median()\n transcript_thresh = self.transcript_count_ * min_transcript_frac\n _LOGGER.info('Median transcript count before smoothing: %.1f',\n cur_transcript_count)\n _LOGGER.info('Transcript count threshold to be exceeded using '\n 'smoothing: %.1f', transcript_thresh)\n\n total_transcripts = num_transcripts.sum()\n if total_transcripts < transcript_thresh:\n raise ValueError(\n 'Specified transcript count threshold is unattainable!')\n\n smoothed_matrix = matrix.copy().astype(np.float64, copy=False)\n if cur_transcript_count >= transcript_thresh:\n _LOGGER.info('No smoothing required!')\n return smoothed_matrix, 1\n\n k = 1\n X = np.array(matrix.X, dtype=np.float64, order='F', copy=False)\n while cur_transcript_count < transcript_thresh:\n k = min(k*2, matrix.n)\n _LOGGER.info('Smoothing with k=%d...', k)\n\n pca_model, pca_transcript_count = \\\n self._select_pca_model(cur_transcript_count)\n\n _LOGGER.info('Selected PCA model with transcript_count=%.1f',\n pca_transcript_count)\n\n tmatrix = pca_model.transform(smoothed_matrix)\n\n t0 = time.time()\n D = pairwise_distances(tmatrix.T, n_jobs=1, metric='euclidean')\n t1 = time.time()\n _LOGGER.info('Calculating the pairwise distance matrix took '\n '%.1f s.', t1-t0)\n\n t0 = time.time()\n A = np.argsort(D, axis=1, kind='mergesort')\n S = np.array(smoothed_matrix.values, order='F', copy=False)\n for j in range(matrix.shape[1]):\n ind = A[j, :k]\n S[:, j] = np.sum(X[:, ind], axis=1)\n\n t1 = time.time()\n _LOGGER.info('Calculating the smoothed expression matrix took '\n '%.1f s.', t1-t0)\n\n smoothed_matrix = ExpMatrix(\n genes=matrix.genes, cells=matrix.cells, X=S)\n cur_transcript_count = smoothed_matrix.sum(axis=0).median()\n _LOGGER.info('The new transcript count is: %.1f',\n cur_transcript_count)\n\n t1_total = time.time()\n\n _LOGGER.info('Applied smoothing with k=%d (took %.1f s)',\n k, t1_total - t0_total)\n\n return smoothed_matrix, k\n","sub_path":"moana/classify/smoothing_model.py","file_name":"smoothing_model.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"319216025","text":"#----------------------------------------------------------------------\n# Utility Module\n''' author: Jeff Peery '''\n# date: 08/21/2008\n# email: JeffPeery@yahoo.com\n#----------------------------------------------------------------------\n\n#----------------------------------------------------------------------\n# Revision Log\n#\n# Rev Date Author Description \n#----------------------------------------------------------------------\n\"\"\"\n 1.05 2018/04/02 SPN -Removed veriy meters list\n 1.04 2016/10/12 SPN -Added PROFILE_BUTTON_FONT_SIZE constant\n 1.03 2016/08/24 SPN Added VERIFY_METER_SN_LIST constant\n 1.02 2014/03/31 SPN Added CTRL_COLOR_BUSY, CTRL_COLOR_PASS, CTRL_COLOR_FAIL constants to support Serial Comm feature\n 1.01 2013/02/20 SPN Added SERIAL_NUMBER_LENGTH2 constant to accomodate for new 12 digit serial #'s\n 1 08/21/2008 JTP Initial Release\n\n\"\"\"\n\n#----------------------------------\n# Application\n#----------------------------------\n# syntax: xx.yy.zz\n# xx is major release\n# yy is feature addition\n# zz is bug fix\n\nVERSION = '01.01.00'\n\nSERIAL_NUMBER_LENGTH = 8\nSERIAL_NUMBER_LENGTH2 = 12\n\n#----------------------------------\n# GUI\n#----------------------------------\nENABLE_THEME = False\nDIRECTORY_FRAME_FONT_SIZE = 20\nCTRL_HEIGHT_DIRECTORY_FRAME = 41\nFRAME_FONT_SIZE = 10\nCTRL_PANEL_FONT_SIZE = 20\nPROFILE_BUTTON_FONT_SIZE = 16\nSYSTEM_PANEL_FONT_SIZE = 12\nFRAME_FONT = 'Tahoma'\nBUTTON_HEIGHT = 75\nBUTTON_WIDTH = 175\nCTRL_WIDTH = 100\nCTRL_HEIGHT = 30\nCHECKBOX_WIDTH = 50\nCHECKBOX_HEIGHT = CTRL_HEIGHT\nDATE_PICKER_WIDTH = 100\nDATE_PICKER_HEIGHT = CTRL_HEIGHT\nTEXT_CTRL_WIDTH = 100\nTEXT_CTRL_HEIGHT = CTRL_HEIGHT\nSTATIC_TEXT_WIDTH = 50\nSTATIC_TEXT_HEIGHT = CTRL_HEIGHT\nCTRL_SPACING = 5\nBUTTON_SPACING = 5\nTOGGLE_BUTTON_DOWN = True\nTOGGLE_BUTTON_UP = False\nAPPROX_TEST_TIME = 60.0\nQUICK_METER_TEST_TIME = 20.0\nTIMER_PERIOD_MS = 1000\nKEYPAD_BUTTON_HEIGHT = 50\nKEYPAD_BUTTON_WIDTH = 50\nCTRL_COLOR_BUSY = 'blue'\nCTRL_COLOR_PASS = 'green'\nCTRL_COLOR_FAIL = 'red'\n","sub_path":"MODULES/myHeader.py","file_name":"myHeader.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"349946285","text":"import matplotlib; matplotlib.use('Agg')\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom FaceDetect.Face_recognition import Face_recogn\nimport cv2\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\nMODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n\nPATH_TO_TEST_IMAGES_DIR = 'test_images'\nTEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'test{}.png'.format(i)) for i in range(2, 13)]\n\n# Size, in inches, of the output images.\nIMAGE_SIZE = (12, 8)\nNUM_CLASSES = 90\n\n\ndef benchmark(func):\n \"\"\"\n Декоратор, выводящий время, которое заняло\n выполнение функции.\n \"\"\"\n import time\n\n def wrapper(*args, **kwargs):\n t = time.clock()\n res = func(*args, **kwargs)\n print(func.__name__, time.clock() - t)\n return res\n\n return wrapper\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\ndef main_func():\n cap = cv2.VideoCapture(0)\n face_detect = Face_recogn()\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n with detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n while True:\n ret, image_np = cap.read()\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n (boxes, scores, classes, num_detections) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n # Visualization of the results of a detection.\n final_score = np.squeeze(scores)\n\n # print(final_score)\n count = 0\n for i in range(100):\n if scores is None or final_score[i] > 0.5:\n count = count + 1\n # print(count)\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8)\n # image_np = cv2.cvtColor(image_np,cv2.COLOR_BGR2RGB)\n image_np = face_detect.recogn_face(image_np)\n cv2.imshow('object detection', cv2.resize(image_np, (800, 600)))\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\nif __name__ == \"__main__\":\n main_func()","sub_path":"detect_human.py","file_name":"detect_human.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"591007851","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nunittest.TestLoader.sortTestMethodsUsing = None\n\nclass BaseTest(unittest.TestCase):\n def setUp(self):\n options = Options()\n # options.add_argument(\"--headless\") # Runs Chrome in headless mode.\n options.add_argument('--no-sandbox') # # Bypass OS security model\n options.add_argument('disable-infobars')\n options.add_argument(\"--disable-extensions\")\n # options.add_argument(\"--start-fullscreen\")\n options.add_argument('--disable-gpu')\n self.driver = webdriver.Chrome('c:\\webdrivers\\chromedriver.exe', options=options)\n # self.driver = webdriver.Firefox()\n self.driver.get(\"https://www.evaly.com.bd\")\n self.driver.maximize_window()\n\n def tearDown(self):\n self.driver.close()\n\n\nif __name__ == \"__main__\":\n unittest.TestLoader.sortTestMethodsUsing = None\n suite = unittest.TestLoader().loadTestsFromTestCase(TestPages)\n unittest.TextTestRunner(verbosity=1).run(suite)\n","sub_path":"tests/base_test.py","file_name":"base_test.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"393801686","text":"# -*- coding: utf-8 -*-\n\nimport pytest\n\n\ndef test_spec_and_value():\n from temboardagent.configuration import OptionSpec, Value\n\n spec = OptionSpec(section='temboard', name='verbose', default=False)\n assert repr(spec)\n\n value = Value('temboard_verbose', True, origin='test')\n assert repr(value)\n\n assert value.name in {spec}\n\n\ndef test_load(mocker):\n mocker.patch('temboardagent.configuration.MergedConfiguration.load_file')\n mocker.patch('temboardagent.configuration.load_plugins_configurations')\n # Bypass file validation\n mocker.patch('temboardagent.configuration.validators.file_', None)\n\n from argparse import Namespace\n from temboardagent.configuration import OptionSpec, load_configuration\n\n specs = [\n # to test argument parsing\n OptionSpec(section='temboard', name='fromarg', default='DEFVAL'),\n # to test environment parsing\n OptionSpec(section='temboard', name='fromenv', default='DEFVAL'),\n # to test default value\n OptionSpec(section='temboard', name='fromdefault', default='DEFVAL'),\n ]\n args = Namespace(temboard_fromarg='ARGVAL')\n environ = dict(\n TEMBOARD_FROMENV='ENVVAL',\n # These should be ignored\n TEMBOARD_FROMARG='ENVAL',\n PATH='',\n )\n config = load_configuration(specs=specs, args=args, environ=environ)\n\n assert 'DEFVAL' == config.temboard.fromdefault\n assert 'ARGVAL' == config.temboard.fromarg\n assert 'ENVVAL' == config.temboard.fromenv\n assert config.temboard.configfile.startswith('/etc/temboard-agent/')\n assert config.loaded is True\n\n\ndef test_load_invalid_from_user(mocker):\n file_ = mocker.patch('temboardagent.configuration.validators.file_')\n file_.side_effect = ValueError()\n\n from temboardagent.configuration import (\n load_configuration,\n UserError,\n )\n\n environ = dict(TEMBOARD_CONFIGFILE=__file__ + 'ne pas créer !')\n with pytest.raises(UserError):\n load_configuration(environ=environ)\n\n\ndef test_load_invalid_default(mocker):\n mocker.patch('temboardagent.configuration.MergedConfiguration.load_file')\n mocker.patch('temboardagent.configuration.load_plugins_configurations')\n # Bypass file validation\n mocker.patch('temboardagent.configuration.validators.file_', None)\n\n validator = mocker.Mock(side_effect=ValueError())\n\n from temboardagent.configuration import OptionSpec, load_configuration\n\n specs = [\n OptionSpec('section', 'name', default='invalid', validator=validator),\n ]\n\n with pytest.raises(ValueError):\n load_configuration(specs=specs, environ={})\n\n\ndef test_load_configparser():\n from temboardagent.configuration import (\n configparser,\n iter_configparser_values,\n )\n\n parser = configparser.RawConfigParser()\n parser.add_section('section0')\n parser.set('section0', 'option0', 'pouet')\n\n values = list(iter_configparser_values(parser, 'my.cfg'))\n\n assert 1 == len(values)\n assert 'pouet' == values[0].value\n assert 'section0_option0' == values[0].name\n assert 'my.cfg' == values[0].origin\n\n\ndef test_logging():\n from temboardagent.configuration import generate_logging_config, DotDict\n\n config = DotDict(dict(logging=dict(\n destination='pouet',\n facility='local0',\n method='stderr',\n level='DEBUG',\n )))\n\n generate_logging_config(**config)\n","sub_path":"test/unit/test_configuration.py","file_name":"test_configuration.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"65472542","text":"import pytest, sys, os,pymysql, docker, os.path\n\n#mysql-connectivity-test\ndef test_mysql_connectivity():\n db = pymysql.connect(host='localhost',user='root',passwd='toor')\n cursor = db.cursor()\n query = (\"SHOW DATABASES\")\n cursor.execute(query)\n for r in cursor:\n assert r != None\n\n#containers-test\ndef test_runningcontainers():\n client = docker.from_env()\n if client.containers.list(all=True):\n assert len(client.containers.list(all=True)) >= 2\n else:\n assert False\n\ndef test_env_isExists():\n assert os.path.isfile('.env')\n\n#yaml-tests\ndef test_yaml():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n filepaths = [] \n for root, dirs, files in os.walk(dir_path):\n for file in files: \n if file.endswith('.yml'):\n filepaths.append(root+'/'+str(file))\n\n for fp in filepaths:\n # Split the extension from the path and normalise it to lowercase.\n ext = os.path.splitext(fp)[-1].lower()\n assert ext == \".yml\"","sub_path":"test/complet_uuid_test.py","file_name":"complet_uuid_test.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"233731664","text":"import Dominion\nimport unittest\nimport random\nfrom collections import defaultdict\nimport io\nimport sys\nimport pytest\n\n\n\"\"\"\n**************************************************************************************************************************************************************\nGlobally defining variables that will be used in the test suite\n**************************************************************************************************************************************************************\n\"\"\"\n#Get player names\nplayer_names = [\"Sonia\",\"*Owen\"]\n\n#number of curses and victory cards\nif len(player_names)>2:\n nV=12\nelse:\n nV=8\nnC = -10 + 10 * len(player_names)\n\n#Define box\nbox = {}\nbox[\"Woodcutter\"]=[Dominion.Woodcutter()]*10\nbox[\"Smithy\"]=[Dominion.Smithy()]*10\nbox[\"Laboratory\"]=[Dominion.Laboratory()]*10\nbox[\"Village\"]=[Dominion.Village()]*10\nbox[\"Festival\"]=[Dominion.Festival()]*10\nbox[\"Market\"]=[Dominion.Market()]*10\nbox[\"Chancellor\"]=[Dominion.Chancellor()]*10\nbox[\"Workshop\"]=[Dominion.Workshop()]*10\nbox[\"Moneylender\"]=[Dominion.Moneylender()]*10\nbox[\"Chapel\"]=[Dominion.Chapel()]*10\nbox[\"Cellar\"]=[Dominion.Cellar()]*10\nbox[\"Remodel\"]=[Dominion.Remodel()]*10\nbox[\"Adventurer\"]=[Dominion.Adventurer()]*10\nbox[\"Feast\"]=[Dominion.Feast()]*10\nbox[\"Mine\"]=[Dominion.Mine()]*10\nbox[\"Library\"]=[Dominion.Library()]*10\nbox[\"Gardens\"]=[Dominion.Gardens()]*nV\nbox[\"Moat\"]=[Dominion.Moat()]*10\nbox[\"Council Room\"]= [Dominion.CouncilRoom()] * 10\nbox[\"Witch\"]=[Dominion.Witch()]*10\nbox[\"Bureaucrat\"]=[Dominion.Bureaucrat()]*10\nbox[\"Militia\"]=[Dominion.Militia()]*10\nbox[\"Spy\"]=[Dominion.Spy()]*10\nbox[\"Thief\"]=[Dominion.Thief()]*10\nbox[\"Throne Room\"]= [Dominion.ThroneRoom()] * 10\n\nsupply_order = {0:['Curse','Copper'],2:['Estate','Cellar','Chapel','Moat'],\n 3:['Silver','Chancellor','Village','Woodcutter','Workshop'],\n 4:['Gardens','Bureaucrat','Feast','Militia','Moneylender','Remodel','Smithy','Spy','Thief','Throne Room'],\n 5:['Duchy','Market','Council Room','Festival','Laboratory','Library','Mine','Witch'],\n 6:['Gold','Adventurer'],8:['Province']}\n\n#Pick 10 cards from box to be in the supply.\nboxlist = [k for k in box]\nrandom.shuffle(boxlist)\nrandom10 = boxlist[:10]\nsupply = defaultdict(list,[(k,box[k]) for k in random10])\n\n\n#The supply always has these cards\nsupply[\"Copper\"]=[Dominion.Copper()]*(60-len(player_names)*7)\nsupply[\"Silver\"]=[Dominion.Silver()]*40\nsupply[\"Gold\"]=[Dominion.Gold()]*30\nsupply[\"Estate\"]=[Dominion.Estate()]*nV\nsupply[\"Duchy\"]=[Dominion.Duchy()]*nV\nsupply[\"Province\"]=[Dominion.Province()]*nV\nsupply[\"Curse\"]=[Dominion.Curse()]*nC\n\n#initialize the trash\ntrash = []\n\n#function from Dominion.py in order to show the proper name of card in discard pile\ndef namesinlist(cardlist):\n namelist = [] \n for c in cardlist:\n namelist.append(c.name)\n return namelist\n\n#Costruct the Player objects\nplayers = []\nfor name in player_names:\n if name[0]==\"*\":\n players.append(Dominion.ComputerPlayer(name[1:]))\n elif name[0]==\"^\":\n players.append(Dominion.TablePlayer(name[1:]))\n else:\n players.append(Dominion.Player(name))\n\n# for value in supply_order:\n# print (\"COST: \",value)\n# for stack in supply_order[value]:\n# if stack in supply:\n# print (stack, len(supply[stack]))\n# print(\"\\n\")\n\ninitial_hand = players[0].hand.copy()\n\n\"\"\"\n**************************************************************************************************************************************************************\nGlobal variable are defined\n**************************************************************************************************************************************************************\n\"\"\"\n\n\nclass TestTotalBuyPower(unittest.TestCase):\n def test_add_gold(self):\n #print(\"totalbuypower test_add_gold...\")\n #print(players[0].hand)\n bp1 = Dominion.totalbuypower(players[0].hand)\n #print(bp1)\n players[0].hand.append(supply[\"Gold\"][0])\n #print(players[0].hand)\n bp2 = Dominion.totalbuypower(players[0].hand)\n #print(bp2)\n self.assertEqual(bp1 + 3, bp2)\n\n def test_add_festival(self):\n #print(\"totalbuypower test_add_festival...\")\n #print(players[0].hand)\n bp1 = Dominion.totalbuypower(players[0].hand)\n #print(bp1)\n supply[\"Festival\"]=[Dominion.Festival()]*1\n players[0].hand.append(supply['Festival'][0])\n #print(players[0].hand)\n bp2 = Dominion.totalbuypower(players[0].hand)\n #print(bp2)\n self.assertEqual(bp1 + 2, bp2)\n\n def test_remove_cards(self):\n #print(\"totalbuypower test_remove_cards...\")\n #print(players[0].hand)\n bp1 = Dominion.totalbuypower(players[0].hand)\n #print(bp1)\n temp_hand = players[0].hand.copy()\n players[0].hand = []\n #print(players[0].hand)\n bp2 = Dominion.totalbuypower(players[0].hand)\n #print(bp2)\n self.assertNotEqual(bp1, 0)\n self.assertEqual(0, bp2)\n players[0].hand = temp_hand\n\n def test_basic_count(self):\n #print(\"totalbuypower test_basic_count...\")\n #print(players[0].hand)\n bp1 = 0\n for card in initial_hand:\n if card.name == \"Copper\":\n bp1+=1\n #print(bp1)\n bp2 = Dominion.totalbuypower(initial_hand)\n #print(bp2)\n self.assertEqual(bp1, bp2)\n\n def test_add_province(self):\n #print(\"totalbuypower test_add_province...\")\n #print(players[0].hand)\n bp1 = Dominion.totalbuypower(players[0].hand)\n #print(bp1)\n players[0].hand.append(supply[\"Province\"][0])\n #print(players[0].hand)\n bp2 = Dominion.totalbuypower(players[0].hand)\n #print(bp2)\n self.assertEqual(bp1, bp2)\n\nclass TestWitchPlay(unittest.TestCase):\n Dominion.Witch().play(players[0], players, supply, trash)\n #print(\"initial VP: \", players[1].calcpoints())\n def test_place_curse(self):\n #print(\"play test_place_curse...\")\n for player in players:\n #print(player.discard)\n if player is not players[0]:\n self.assertEqual(len(player.discard), 1)\n\n def test_self_curse(self):\n #print(\"play test_place_curse...\")\n #print(players[0].discard)\n self.assertEqual(len(players[0].discard), 0)\n\n def test_new_vp(self):\n #print(\"play test_new_vp...\")\n #print(\"cursed VP: \", players[1].calcpoints())\n self.assertGreater(players[0].calcpoints(), players[1].calcpoints())\n\n def test_no_curse(self):\n #print(\"play test_no_curse...\")\n before_length = len(players[1].discard)\n #print(players[1].discard)\n while len(supply[\"Curse\"]) > 0:\n supply[\"Curse\"].pop()\n Dominion.Witch().play(players[0], players, supply, trash)\n #print(players[1].discard)\n after_length = len(players[1].discard)\n self.assertEqual(before_length, after_length)\n\n def test_protect(self):\n #print(\"play test_protect...\")\n dis1 = len(players[1].discard)\n #print(players[1].discard)\n supply[\"Curse\"] = [Dominion.Curse()] * 1\n supply[\"Moat\"] = [Dominion.Moat()] * 1\n players[1].hand.append(supply['Moat'][0])\n Dominion.Witch().play(players[0], players, supply, trash)\n #print(players[1].discard)\n dis2 = len(players[1].discard)\n self.assertEqual(dis1, dis2)\n\n\nif __name__ == '__main__':\n print(\"=====\\nTesting\\n=====\")\n unittest.main()\n\n\n\n\n","sub_path":"owenTestFile.py","file_name":"owenTestFile.py","file_ext":"py","file_size_in_byte":7587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"377331118","text":"import socket\nfrom SocketWrapper import SocketRFIDLog\nfrom Parsing import *\nfrom IO_Azure import *\n\ndef connection(address, port):\n isConnected = False\n while not isConnected:\n sock = socket.socket()\n #sock.settimeout(5)\n try:\n sock.connect((address, port))\n except socket.timeout:\n print(\"Sock: ({},{}) not connected\".format(address, port))\n continue\n\n isConnected = True\n\n mySocket = SocketRFIDLog(sock, address, port)\n print(\"Sock: ({},{}) connected\".format(address, port))\n return mySocket\n\n\nif __name__ == \"__main__\":\n address = \"192.168.101.70\"\n port = 10001\n\n freshStart = True # skip first sentence to make sure we start reading the sentence from the beginning\n connected = False\n while True:\n if not connected:\n mySocket = connection(address, port)\n freshStart = True\n\n connected = True\n mySocket.dict, doc_link = readAzureRFIDLogDict()\n interrupted = extractRFIDLog(mySocket, freshStart)\n freshStart = False\n if interrupted:\n connected = False\n replaceAzureDict(mySocket.dict, doc_link)\n","sub_path":"main-rfid-log.py","file_name":"main-rfid-log.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"374372104","text":"# -*- coding: utf-8 -*-\nimport os\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch, cm\nfrom reportlab.lib.pagesizes import A4,landscape\nfrom reportlab.platypus import Image\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nimport datetime\n\nfrom django.conf import settings\n\n\nCURRENT_DIR = os.path.realpath(os.path.dirname(__file__))\nFONT_FILE = os.path.join(CURRENT_DIR, 'Arial.ttf')\npdfmetrics.registerFont(TTFont(\"Arial\", FONT_FILE)) \n\n### Variables ###\n\n\nwidth,height=landscape(A4) # width and height of A4 Landscape format\n\nmarginX,marginY=0,0\n\ntitleX,titleY=3.57*cm+100,9.95*cm+100\n\nnameX,nameY=2.17*cm+100,7.71*cm+100\n\nmainContentX,mainContentY=2.17*cm+100,7.04*cm+100\n\ndateX,dateY=2.17*cm+100,3.74*cm+100\n\nlegalMentionsX,legalMentionsY=2.17*cm+100,0.8*cm+100\n\nlogoX,logoY=2.17*cm+100,11.6*cm+100\n\nlogoOrganizationX,logoOrganizationY=18*cm+100,11.6*cm+100\n\nlistProfessorsX,listProfessorsY=width-150,height-190\n\nURLFunX,URLFunY=100+7.57*cm,2.25*cm+100\n\nMoocX,MoocY=2.17*cm+100,1.47*cm+100\n\n#################\n\nclass CertificateInfo(object):\n full_name = \"\"\n course_name = \"\"\n organization = \"\"\n organization_logo = \"\"\n pdf_file_name = \"\"\n teachers = []\n organizationNameTooLong=False\n courseNameTooLong=False\n\n\n def generate(self):\n c=canvas.Canvas(self.pdf_file_name,pagesize=landscape(A4))\n\n\n if len(self.organization)>33:\n self.organizationNameTooLong=True\n\n if len(self.course_name)>33:\n self.courseNameTooLong=True\n\n #border\n c.setStrokeColorRGB(221./256,221./256,221./256)\n c.setLineWidth(cm*0.13)\n c.rect(100,100,cm*23.84,cm*15)\n\n\n #title\n textobject=c.beginText()\n textobject.setTextOrigin(titleX,titleY)\n textobject.setFont(\"Arial\",24)\n textobject.setFillColorRGB(59./256,118./256,188./256)\n textobject.textLine(u\" ATTESTATION DE SUIVI AVEC SUCCÈS\")\n c.drawText(textobject) \n\n c.setFillColorRGB(221./256,221./256,221./256)\n if (self.organizationNameTooLong):\n if (self.courseNameTooLong):\n c.rect(100,2.43*cm+100,400,6.41*cm,fill=1,stroke=0)\n c.rect(cm*22.6+100,2.43*cm+100,1.24*cm,6.41*cm,fill=1,stroke=0)\n else:\n c.rect(100,2.73*cm+100,400,6.11*cm,fill=1,stroke=0)\n c.rect(cm*22.6+100,2.73*cm+100,1.24*cm,6.11*cm,fill=1,stroke=0)\n else:\n c.rect(100,3.23*cm+100,400,5.61*cm,fill=1,stroke=0)\n c.rect(cm*22.6+100,3.23*cm+100,1.24*cm,5.61*cm,fill=1,stroke=0)\n\n\n #Name\n textobject=c.beginText()\n textobject.setTextOrigin(nameX,nameY)\n textobject.setFont(\"Arial\",24)\n textobject.setFillColorRGB(0,0,0)\n textobject.textLine(self.full_name)\n c.drawText(textobject)\n\n\n #Main Content\n textobject=c.beginText()\n textobject.setTextOrigin(mainContentX,mainContentY)\n textobject.setFont(\"Arial\",16)\n textobject.setFillColorRGB(127./256,127./256,127./256)\n textobject.textOut(u\"a suivi avec succès le MOOC\")\n textobject.setFillColorRGB(59./256,118./256,188./256)\n textobject.textLine(\"*\")\n\n\n textobject.setFillColorRGB(0,0,0)\n textobject.moveCursor(0,10)\n if (self.courseNameTooLong):\n indexReturnLine=self.course_name.rfind(\" \",0,43)\n textobject.textLine(self.course_name[:indexReturnLine])\n textobject.textLine(self.course_name[indexReturnLine+1:])\n else:\n textobject.textLine(self.course_name)\n\n textobject.setFillColorRGB(127./256,127./256,127./256)\n if (self.organizationNameTooLong):\n indexReturnLine=self.organization.rfind(\" \",0,33)\n textobject.textOut(u\"proposé par \")\n textobject.setFillColorRGB(0,0,0)\n textobject.textLine(self.organization[:indexReturnLine])\n textobject.textLine(self.organization[indexReturnLine+1:])\n else:\n textobject.textOut(u\"proposé par \")\n textobject.setFillColorRGB(0,0,0)\n textobject.textLine(self.organization)\n textobject.setFillColorRGB(0,0,0)\n\n textobject.setFillColorRGB(127./256,127./256,127./256)\n textobject.textOut(u\"et diffusé sur la \")\n textobject.setFillColorRGB(0,0,0)\n textobject.textLine(u\"plateforme FUN\")\n textobject.setFillColorRGB(59./256,118./256,188./256)\n textobject.textLine(datetime.date.today().strftime('Le %d/%m/%Y'))\n c.drawText(textobject)\n\n\n #legal mentions\n textobject=c.beginText()\n textobject.setTextOrigin(legalMentionsX,legalMentionsY)\n textobject.setFont(\"Arial\",8.5)\n textobject.setFillColorRGB(0,0,0)\n legalMentions=u\"La présente attestation n’est pas un diplôme et ne confère pas de crédits (ECTS). Elle n'atteste pas que le participant était inscrit à/au {}.\".format(self.organization)\n if (len(legalMentions)>169):\n indexReturnLine=legalMentions.rfind(\" \",0,169)\n textobject.textLine(legalMentions[:indexReturnLine])\n textobject.textOut(legalMentions[indexReturnLine+1:])\n textobject.textLine(u\". L’identité du participant n’a pas été vérifiée.\")\n else:\n textobject.textLine(u\"La présente attestation n’est pas un diplôme et ne confère pas de crédits (ECTS). Elle n'atteste pas que le participant était inscrit à/au {}.\".format(self.organization))\n textobject.textLine(u\"L’identité du participant n’a pas été vérifiée.\")\n c.drawText(textobject)\n \n #logo of FUN\n c.drawImage(settings.FUN_LOGO_PATH,logoX,logoY,width=83,height=77,mask='auto')\n\n\n #logo of the organization\n if (self.organization_logo):\n c.drawImage(self.organization_logo,logoOrganizationX,logoOrganizationY,width=140,height=80,mask='auto')\n\n\n #List of professors\n c.setFont(\"Arial\",16)\n c.setFillColorRGB(59./256,118./256,188./256)\n c.drawRightString(100+21.70*cm,100+8.40*cm,\"Les enseignants\")\n y=7.5\n c.setFillColorRGB(0,0,0)\n c.setFont(\"Arial\",12)\n for teacher in self.teachers:\n teacherNameAndJob=teacher.split(\"/\")\n teacherName=teacherNameAndJob[0]\n teacherJob=teacherNameAndJob[1]\n c.setFillColorRGB(0,0,0)\n c.drawRightString(100+21.70*cm,100+y*cm,teacherName)\n c.setFillColorRGB(127./256,127./256,127./256)\n c.drawRightString(100+21.70*cm,100+(y-0.53)*cm,teacherJob)\n y=y-1.24\n\n\n\n #URL Fun\n textobject=c.beginText()\n if (self.organizationNameTooLong):\n textobject.setTextOrigin(URLFunX,URLFunY-10)\n else:\n textobject.setTextOrigin(URLFunX,URLFunY)\n textobject.setFont(\"Arial\",12)\n textobject.setFillColorRGB(59./256,118./256,188./256)\n textobject.textLine(u\"http://www.france-universite-numerique-mooc.fr\")\n c.drawText(textobject) \n\n\n\t #Mooc : cours en ligne\n textobject=c.beginText()\n textobject.setTextOrigin(MoocX,MoocY)\n textobject.setFont(\"Arial\",8)\n textobject.setFillColorRGB(59./256,118./256,188./256)\n textobject.textOut(\"* \")\n textobject.setFillColorRGB(127./256,127./256,127./256)\n textobject.textLine(u\"MOOC : cours en ligne\")\n c.drawText(textobject) \t \n\n\n\n c.showPage()\n c.save()\n\n return True\n\n","sub_path":"fun_certificates/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"89117290","text":"import parallelstrategies as ps\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef dist(w, p):\n min = 999\n for spot_xy in p.spots_xy:\n d = np.sqrt((w.x-spot_xy[0])**2 + (w.y-spot_xy[1])**2)\n if d < min:\n min = d\n return min\n\ndef runDual(duration_mins):\n w = ps.Worm(dt,duration_steps,pir_window)\n p = ps.Plate()\n d_hist = np.zeros(duration_steps)\n inside = 0\n for step in range(duration_steps):\n w.step_rc()\n w.step_wv(p.conc(w.sx[0],w.sy[0])-p.conc(w.sx[1],w.sy[1]))\n w.step_pr(step,p.conc(w.x,w.y))\n w.step()\n p.step(dt)\n if np.sqrt(w.x**2 + w.y**2) > p.petri_radius:\n w.bounce(p.petri_radius)\n d_hist[step] = dist(w,p)\n if d_hist[step] < p.CI_radial_radius:\n inside += 1\n outside = duration_steps - inside\n ci = (inside - outside)/duration_steps\n return d_hist,ci\n\nreps = 5000\npir_window = 0.6\nduration_mins = 20\ndt = 0.6\nduration_steps = int((duration_mins * 60) / dt)\nd = np.zeros((reps,duration_steps))\nci = np.zeros(reps)\nfor i in range(reps):\n d[i],ci[i] = runDual(duration_mins)\nnp.savetxt('exp1A2_R_d.csv', d, delimiter=',')\nnp.savetxt('exp1A2_R_ci.csv', ci, delimiter=',')\n","sub_path":"src/exp1C_R.py","file_name":"exp1C_R.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"112866847","text":"#!/usr/bin/env python3\n# -+-coding: utf-8 -+-\n\n\"\"\"\n\"\"\"\n\n#--------------------------------------------\n# Authors: Frank Boers \n#\n#-------------------------------------------- \n# Date: 30.04.19\n#-------------------------------------------- \n# License: BSD (3-clause)\n#--------------------------------------------\n# Updates\n#--------------------------------------------\n\"\"\"\npreproc functions:\n apply_noise_reducer call noise_reducer\n apply_suggest_bads call suggest_bads\n apply_interpolate_bads call interpolate_bads\n\n\"\"\"\nimport sys,os,logging,yaml,argparse,glob\n#from contextlib import redirect_stdout\n\nimport mne\n\nfrom jumeg.base.jumeg_base import jumeg_base as jb\nfrom jumeg.base import jumeg_logger # import STDStreamLogger # capture stout,stderr\nfrom jumeg.plot.jumeg_plot_preproc import JuMEG_PLOT_PSD\nfrom jumeg.base.pipelines.jumeg_pipelines_utils_base import get_args\n#--- preproc\nfrom jumeg.jumeg_noise_reducer import noise_reducer\nfrom jumeg.jumeg_suggest_bads import suggest_bads\nfrom jumeg.jumeg_interpolate_bads import interpolate_bads as jumeg_interpolate_bads\n\nlogger = logging.getLogger(\"jumeg\")\n\n__version__= \"2019.05.10.001\"\n\n#---------------------------------------------------\n#--- apply_noise_reducer\n#---------------------------------------------------\ndef apply_noise_reducer(raw_fname,raw=None,**cfg):\n '''\n apply thrice to reference channels with different freq parameters\n save PSD plot in subfolder /plots\n \n !!! overwrite raw-obj, works inplace !!!\n \n 0) reset bads and check for dead channels\n 1) apply nr low pass filter for freq below e.g.: 5Hz \n 2) apply nr high pass filter if defined \n 3) apply nr notch filter to remove power line noise \n 4) save PSD plot\n \n Parameter:\n -----------\n parameter used in this function :\n fname_raw : input raw filename\n raw : \\n\n cfg : dict, part of config file \n from config file part\\n\n reflp : \\n\n refhp : \\n\n refnotch : \\n\n \n plot: True\n plot_show : True\n plot_dir : subdir to save plots\n \n postfix : \"nr\"\n file_extention: [\"meeg-raw.fif\",\"rfDC-empty.fif\"]\n \n run : True\n save : True\n overwrite: True\n \n ToDo add parameter extended\n parameter extended\n signals=[], noiseref=[], detrending=None,\n tmin=None, tmax=None,\n exclude_artifacts=True, checkresults=True, return_raw=False,\n complementary_signal=False, fnout=None, verbose=False\n\n Return:\n --------\n filename,raw-obj\n '''\n \n \n #--- init plot\n fname_out = None\n logger.info(\" -> apply_noise_reducer file name: {}\".format(raw_fname))\n logger.debug(\" -> config parameter:\\n{}\".format( cfg ))\n \n if not jb.check_file_extention(fname=raw_fname,file_extention=cfg.get(\"file_extention\") ):\n return\n\n if not cfg.get(\"run\"):\n #--- return raw_fname,raw\n return jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=False,\n postfix = cfg.get(\"postfix\",\"nr\"),overwrite = cfg.get(\"overwrite\",True))\n \n \n logger.info(\" --> preproc noise_reducer for raw file: {}\".format(raw_fname))\n \n #--- noise reduction\n # apply noise reducer thrice to reference channels with different freq parameters\n # !!! overwrite raw-obj !!!\n save = False\n raw_changed = False\n jb.verbose = cfg.get(\"verbose\")\n \n #--- load raw, reset bads\n raw,raw_fname = jb.get_raw_obj(raw_fname,raw=raw,reset_bads=True)\n \n #--- check dead channes and mark them as bad\n jb.picks.check_dead_channels(raw=raw)\n \n #--- start plot denoising orig raw psd, avoid reloading raw data\n if cfg.get(\"plot\"):\n jplt = JuMEG_PLOT_PSD(n_plots=2,name=\"denoising\",verbose=True)\n jplt.plot(raw,title=\"orig: \" + os.path.basename(raw_fname),check_dead_channels=False)\n \n #--- with redirect stdout/err\n with jumeg_logger.StreamLoggerSTD(label=\"noise_reducer\"):\n #--- 1 nr low pass filter for freq below 5 hz\n if cfg.get(\"reflp\"):\n raw = noise_reducer(None,raw=raw,reflp=cfg.get(\"reflp\"),return_raw=True,verbose=cfg.get(\"verbose\"),exclude_artifacts=False)\n raw_changed = True\n #--- 2 nr high pass filter\n if cfg.get(\"refhp\"):\n raw = noise_reducer(None,raw=raw,reflp=cfg.get(\"refhp\"),return_raw=True,verbose=cfg.get(\"verbose\"),exclude_artifacts=False)\n raw_changed = True\n #--- 3 nr notch filter to remove power line noise\n if cfg.get(\"refnotch\"):\n raw = noise_reducer(None,raw=raw,refnotch=cfg.get(\"refnotch\"),fnout=None,return_raw=True,verbose=cfg.get(\"verbose\"),exclude_artifacts=False)\n raw_changed = True\n \n \n #--- save and update filename in raw\n if cfg.get(\"save\"):\n save = raw_changed\n \n #--- update filename in raw and save if save\n fname_out,raw = jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=save,update_raw_filenname=True,\n postfix=cfg.get(\"postfix\",\"nr\"),overwrite=cfg.get(\"overwrite\",True))\n \n #--- plot results, avoid reloading raw data\n if cfg.get(\"plot\"):\n jplt.plot(raw,title=\"denoised: \"+os.path.basename(fname_out),check_dead_channels=False)\n if cfg.get(\"plot_show\"):\n jplt.show()\n jplt.save(fname=fname_out,plot_dir=cfg.get(\"plor_dir\",\"plots\"))\n \n if fname_out:\n return fname_out,raw\n else:\n raise Exception(\"---> ERROR file name not defined !!!\")\n\n#---------------------------------------------------\n#--- apply_suggest_bads\n#---------------------------------------------------\ndef apply_suggest_bads(raw_fname,raw=None,**cfg):\n \"\"\"\n\n :param raw_fname:\n :param raw:\n :param cfg:\n :return:\n filename,raw-obj\n \"\"\"\n fname_out = None\n logger.info(\" -> apply_suggest_bads file name: {}\".format(raw_fname))\n logger.debug(\" -> config parameter:\\n{}\".format(cfg))\n \n \n if not jb.check_file_extention(fname=raw_fname,file_extention=cfg.get(\"file_extention\")):\n return\n \n #--- return raw_fname,raw\n if not cfg.get(\"run\"):\n return jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=False,\n postfix=cfg.get(\"postfix\",\"bcc\"),overwrite=cfg.get(\"overwrite\",True))\n \n raw_changed = True\n jb.verbose = cfg.get(\"verbose\")\n raw,raw_fname = jb.get_raw_obj(raw_fname,raw=raw)\n \n if raw:\n with jumeg_logger.StreamLoggerSTD(label=\"suggest_bads\"):\n marked,raw = suggest_bads(raw) #,**cfg[\"parameter\"]) #show_raw=cfg.get(\"show_raw\") )\n\n fname_out,raw = jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=cfg.get(\"save\"),update_raw_filenname=True,\n postfix=cfg.get(\"postfix\",\"bcc\"),overwrite=cfg.get(\"overwrite\",True))\n \n if fname_out:\n return fname_out,raw\n else:\n raise Exception( \"---> ERROR file name not defined !!!\" )\n \n\n#---------------------------------------------------\n#--- apply_interpolate_bads\n#---------------------------------------------------\ndef apply_interpolate_bads(raw_fname,raw=None,**cfg):\n \"\"\"\n\n :param raw_fname:\n :param raw:\n :param cfg:\n :return:\n filename,raw-obj\n \"\"\"\n fname_out = None\n logger.info(\" -> apply_interpolate_bad file name: {}\".format(raw_fname))\n logger.debug(\" -> config parameter:\\n{}\".format(cfg))\n jb.verbose = cfg.get(\"verbose\")\n \n if not jb.check_file_extention(fname=raw_fname,file_extention=cfg.get(\"file_extention\")):\n return\n #--- return raw_fname,raw\n if not cfg.get(\"run\"):\n return jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=False,update_raw_filenname=True,\n postfix=cfg.get(\"postfix\",\"int\"),overwrite=cfg.get(\"overwrite\",True))\n\n raw,raw_fname = jb.get_raw_obj(raw_fname,raw=raw)\n \n if raw:\n logger.info(\"fname: {}\".format(raw_fname) )\n #--- Interpolate bad channels using jumeg\n with jumeg_logger.StreamLoggerSTD(label=\"interpolate_bads\"):\n raw = jumeg_interpolate_bads(raw) #,**cfg.get(\"parameter\")) #,origin=cfg.get(\"origin\",None),reset_bads=cfg.get(\"reset_bads\",True) )\n \n #-- check results\n if cfg.get(\"plot_block\"):\n raw.plot(block=cfg.get(\"plot_block\"))\n \n #--- update filename in raw and save if save\n fname_out,raw = jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=cfg.get(\"save\"),\n postfix=cfg.get(\"postfix\",\"int\"),overwrite=cfg.get(\"overwrite\",True))\n \n if fname_out:\n return fname_out,raw\n else:\n raise Exception( \"---> ERROR file name not defined !!!\" )\n \n \n\n#---------------------------------------------------\n#--- apply_filter\n#---------------------------------------------------\ndef apply_filter(raw_fname,raw=None,**cfg):\n \"\"\"\n\n :param raw_fname:\n :param raw:\n :param cfg:\n :return:\n filename,raw-obj\n \"\"\"\n return\n logger.info(\" -> apply_filter file name: {}\".format(raw_fname))\n logger.debug(\" -> config parameter:\\n{}\".format(cfg))\n\n if not jb.check_file_extention(fname=raw_fname,file_extention=cfg.get(\"file_extention\")):\n return\n #--- return raw_fname,raw\n if not cfg.get(\"run\"):\n return jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=False,\n postfix=cfg.get(\"postfix\",\"fi\"),overwrite=cfg.get(\"overwrite\",True))\n\n #--- catch stdout,stderr\n #jumeg_logger.log_stdout(label=\"filter\")\n #jumeg_logger.log_stderr(label=\"filter\")\n\n jb.verbose = cfg.get(\"verbose\")\n raw,raw_fname = jb.get_raw_obj(raw_fname,raw=raw)\n \n #--- ToDo setup mne filter as jumeg CLS\n #raw,raw_fname = jumeg_mne_fileter(raw)\n raw_changed=True\n \n #--- save and update filename in raw\n #if raw_changed:\n # fname_out,raw = jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=cfg.get(\"save\"),update_raw_filenname=True,\n # postfix=cfg.get(\"postfix\",\"bcc\"),overwrite=cfg.get(\"overwrite\",True))\n\n #--- return back stdout/stderr from logger\n #jumeg_logger.log_stdout(reset=True)\n #jumeg_logger.log_stderr(reset=True)\n\n return fname_out,raw\n\n\n#---------------------------------------------------\n#--- apply_resample\n#---------------------------------------------------\ndef apply_resample(raw_fname,raw=None,**cfg):\n \"\"\"\n\n :param raw_fname:\n :param raw:\n :param cfg:\n :return:\n filename,raw-obj\n \"\"\"\n return\n logger.info(\" -> apply_resample file name: {}\".format(raw_fname))\n logger.debug(\" -> config parameter:\\n{}\".format(cfg))\n \n if not jb.check_file_extention(fname=raw_fname,file_extention=cfg.get(\"file_extention\")):\n return\n #--- return raw_fname,raw\n if not cfg.get(\"run\"):\n return jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=False,\n postfix=cfg.get(\"postfix\",\"res\"),overwrite=cfg.get(\"overwrite\",True))\n\n #--- catch stdout,stderr\n #jumeg_logger.log_stdout(label=\"filter\")\n #jumeg_logger.log_stderr(label=\"filter\")\n\n jb.verbose = cfg.get(\"verbose\")\n raw,raw_fname = jb.get_raw_obj(raw_fname,raw=raw)\n \n #--- ToDo setup resampling\n #raw,raw_fname = jumeg_mne_fileter(raw)\n raw_changed=True\n \n #--- save and update filename in raw\n #if raw_changed:\n # fname_out,raw = jb.update_and_save_raw(raw,fin=raw_fname,fout=None,save=cfg.get(\"save\"),update_raw_filenname=True,\n # postfix=cfg.get(\"postfix\",\"bcc\"),overwrite=cfg.get(\"overwrite\",True))\n\n #--- return back stdout/stderr from logger\n #jumeg_logger.log_stdout(reset=True)\n #jumeg_logger.log_stderr(reset=True)\n\n return fname_out,raw\n\n\n","sub_path":"jumeg/base/pipelines/jumeg_pipelines_utils0.py","file_name":"jumeg_pipelines_utils0.py","file_ext":"py","file_size_in_byte":12152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"51934430","text":"# -*- coding: utf-8 -*-\n\n\nimport time\nimport tsp_util\n\nt0 = time.time()\nfile = open('qa194.tsp','r').read().splitlines()[7:-1]\n\nlist = []\nfor item in file:\n num, x, y = item.split(\" \")\n list.append([float(x), float(y)]) # int tuple list\n\npoint = list.pop(0) # pop out the first item in the list\n\npath, sum = tsp_util.findpath(point, list)\nt1 = time.time()\ntotal = t1 - t0\n\nprint(\"Optimal route:\", path)\nprint(\"Length:\", sum)\nprint(\"Time Take: %.3fs\" %total)\n\n# graph it\ntsp_util.graphplot(path)","sub_path":"greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"343513710","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom russian.exercise import views\nfrom django.views.generic import TemplateView\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'russian.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^exercise/(?P\\d+)/checker/$', views.checker, name='checker'),\n url(r'^exercise/(?P\\d+)/$', views.exercise, name='exercise'),\n url(r'^captcha/', include('captcha.urls')),\n url(r'^accounts/', include('allauth.urls')),\n url(r'^accounts/profile/$', views.profile, name='profile'),\n url(r'^exercises_list/$', views.exercises_list, name='exercises_list'),\n url(r'^rating/$', views.rating, name='rating'),\n url(r'^faq/$', views.faq, name='faq'),\n url(r'^feedback/$', views.feedback, name='feedback'),\n url(r'^$', views.index, name='index'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^comments/', include('django_comments.urls')),\n url(r'^robots\\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),\n)\n","sub_path":"russian/russian/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"12622481","text":"\n# coding: utf-8\n\n# In[21]:\n\n\nimport pandas as pd\nimport subprocess \nimport datetime \n\nload_new = False\ntry:\n data = pd.read_csv('data.csv')\nexcept FileNotFoundError:\n load_new = True\n\ndata['date'] = pd.to_datetime(data['date'], format='%Y-%m-%d') #, data.dtypes\n\nif datetime.datetime.now().date() - data.date.max().date() > datetime.timedelta(days = 1):\n load_new = True\n\nif load_new:\n print('Download data:')\n subprocess.call(['wget', '-O', 'data.csv', 'http://cowid.netlify.com/data/full_data.csv'])\n\ndata = pd.read_csv('data.csv')\ndata['date'] = pd.to_datetime(data['date'], format='%Y-%m-%d') #, data.dtypes\nprint('Latest date: {}'.format(data.date.max().date()))\ndata.head()\n\n\n# In[22]:\n\n\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nimport datetime \nimport numpy as np\n\ndef set_ticks(times, ax):\n tickevery = 4\n #ticks = list(map(lambda x: x.strftime(\"%d %b\"), times))[::tickevery]\n \n ticks = np.datetime_as_string(times, unit='D')[::tickevery]\n \n ax.set_xticks(times[::tickevery])\n ax.set_xticklabels(ticks, rotation = 70, ha=\"right\")\n \ndef plot_bar_datetime(times, y, ax, label = None, align = 'right', color = None):\n \n if align == 'right':\n times = times + np.timedelta64(12, 'h')\n \n ax.bar(times, y, width = .4, label = label, color = color) #, align = align)\n \n\ndef fit_model(times, y_tot, first_case_date):\n \n \n exp_f_ = lambda t, N, a: N*np.exp(a*t)/(N + np.exp(a*t) - 1)\n #def exp_f_(times_, R, N):\n # return np.exp(times_/R)*(N - y.cumsum())/N \n \n times_ = (times - first_case_date)/np.timedelta64(1, 'D') # - np.timedelta64(1, 'D')\n \n N, a = curve_fit(exp_f_, times_, y_tot, [1000, 1])[0]\n \n \n model_totcases = exp_f_(times_, N, a) #model_newcases.cumsum()\n model_newcases = np.insert(np.diff(model_totcases), 0, 0)\n return model_newcases, model_totcases, N, a\n \n \ndef plot_line_datetime(times, y, ax, label = None, color = None):\n \n ax.plot(times, y, label = label, c = color)\n \n\ndef plot_fig1(mask, title, ax):\n times = data.loc[mask, 'date'].values.astype('datetime64[D]')\n y_new = data.loc[mask, 'new_cases'].values\n y_tot = data.loc[mask, 'total_cases'].values\n\n ax2 = ax.twinx()\n y_new = np.nan_to_num(y_new)\n\n plot_bar_datetime(times, y_new, ax2, 'new_cases', align = 'right', color = 'C0')\n plot_bar_datetime(times, y_tot, ax, 'tot_cases', align = 'left', color = 'C1')\n \n try:\n y_fit_newcases, y_fit_totcases, N, a = fit_model(times, y_tot, times[y_new != 0][0])\n except RuntimeError:\n return\n \n label = r'fit: y = N*e^(a*t)/(N + e^(a*t) - 1)' + '\\na={:.2f} \\nN={:.0f}'.format(a, N)\n plot_line_datetime(times, y_fit_newcases, ax2, None, color = 'C0')\n plot_line_datetime(times, y_fit_totcases, ax, label, color = 'C1')\n \n if title is not None: ax.set_title(title)\n if label is not None: \n ax2.legend(frameon = False, loc = 3)\n ax.legend(frameon = False, loc = 2)\n ax.set_ylabel('total # cases')\n ax2.set_ylabel('# new cases')\n\n\n\n# select countries:\nminncases = 30\nremove_countries = ['International', 'China']\ncountries = []\nfor country in data.location.unique():\n if not data.loc[data.loc[:, 'location'] == country, 'total_cases'].max() > minncases:\n continue\n if country in remove_countries:\n continue\n \n countries.append(country)\n\n#countries = ['South Korea']\nncols = 2\nnrows = len(countries)//ncols + 1\n\nfigW, figH = ncols * 8, nrows*4\nfig, axarr = plt.subplots(nrows, ncols, figsize = (figW, figH), sharex = True)\n\nfor country, ax in zip(countries, axarr.ravel()):\n mask = data.location == country\n plot_fig1(mask, country, ax)\n\ntimes = data.date.unique()\nfor i in range(len(axarr[0])): set_ticks(times, axarr[-1, i])\n \n\nplt.tight_layout()\nplt.savefig('countries.pdf', facecolor=fig.get_facecolor(), transparent=True)\nplt.savefig('countries.png', dpi = 200, facecolor=fig.get_facecolor(), transparent=True)\nplt.show()\n\n\n# ### Solve a spread model:\n\n# In[55]:\n\n\nfrom sympy import *\n\nt, a, N, C1 = symbols(r't a N C_1', real = True)\n# y = total number of infected (t)\ny = Function('y')(t)\ny_ = Derivative(y, t)\n\n\neq = dsolve(a*y*(1-y/N) - y_, y, ics = {y.subs(t, 0):1})\neq\n\n\n# #### use condition: y(0) == 1\n\n# In[56]:\n\n\n\nyy = (-N/(exp(C1*N - a*t) - 1)).subs(C1, ln(1-N)/N)\n\n(yy.diff(t) - a*yy*(1-yy/N)).simplify()\nyy.simplify()\n\n\n# #### Sanity check:\n\n# In[58]:\n\n\nN = 100\na = .1\n\ny_ = lambda t, N, a: N*np.exp(a*t)/(N + np.exp(a*t) - 1)\n\nt = np.linspace(0,80,100)\n\nplt.plot(t, y_(t, N, a))\nplt.show()\n\n","sub_path":"explr_covid_data.py","file_name":"explr_covid_data.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"227625231","text":"import os\nimport sys\nimport time\nimport math\nimport yaml\nimport torch\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(BASE_DIR, 'data'))\nsys.path.append(os.path.join(BASE_DIR, 'model'))\n\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='PointNet')\n parser.add_argument('--work_dir', default='work_dir/temp')\n parser.add_argument('--config', default='config/shapenet_cstnet.yaml')\n\n parser.add_argument('--phase', default='train')\n parser.add_argument('--log_interval', type=int, default=10)\n parser.add_argument('--save_interval', type=int, default=1)\n parser.add_argument('--eval_interval', type=int, default=1)\n\n parser.add_argument('--feeder', default='data.Dataset')\n parser.add_argument('--data_path', default='/input/shapenetcore_partanno_segmentation_benchmark_v0')\n parser.add_argument('--class_choice', default='Chair')\n parser.add_argument('--npoints', type=int, default=2500)\n parser.add_argument('--num_workers', type=int, default=8)\n\n parser.add_argument('--model', default='model.PointNetSeg')\n parser.add_argument('--weights', default=None)\n\n parser.add_argument('--base_lr', type=float, default=0.01)\n parser.add_argument('--base_momentum', type=float, default=0.9)\n parser.add_argument('--num_epoch', type=int, default=2)\n parser.add_argument('--device', default=0, nargs='+')\n parser.add_argument('--nesterov', type=str2bool, default=True)\n parser.add_argument('--batch_size', type=int, default=64)\n parser.add_argument('--weight_decay', type=float, default=0.0001)\n\n return parser\n\n\nclass Processor():\n def __init__(self, arg, cat):\n self.arg = arg\n self.cat = cat\n self.output_device = self.arg.device[0] if type(self.arg.device) is list else self.arg.device\n self.load_data()\n self.load_model()\n self.load_optimizer()\n\n def load_data(self):\n torch.manual_seed(123)\n Feeder = import_class(self.arg.feeder)\n self.data_loader = dict()\n if self.arg.phase == 'train':\n train_dataset = Feeder(\n root=self.arg.data_path,\n npoints=self.arg.npoints,\n class_choice=self.cat,\n train=True)\n self.data_loader['train'] = torch.utils.data.DataLoader(\n dataset=train_dataset,\n batch_size=self.arg.batch_size,\n shuffle=False,\n num_workers=self.arg.num_workers)\n val_dataset = Feeder(\n root=self.arg.data_path,\n npoints=self.arg.npoints,\n class_choice=self.cat,\n train=False)\n self.val_dataset = val_dataset\n self.data_loader['val'] = torch.utils.data.DataLoader(\n dataset=val_dataset,\n batch_size=self.arg.batch_size,\n shuffle=False,\n num_workers=self.arg.num_workers)\n self.batch_length = math.ceil(len(val_dataset) / self.arg.batch_size)\n self.num_seg_class = val_dataset.num_seg_class\n self.print_log('Load data Completed')\n\n def load_model(self):\n Model = import_class(self.arg.model)\n self.model = Model(self.arg.npoints, self.num_seg_class).cuda(self.output_device)\n\n #if self.arg.weights:\n #TODO\n\n if type(self.arg.device) is list:\n if len(self.arg.device) > 1:\n self.model = nn.DataParallel(\n self.model,\n device_ids=self.arg.device,\n output_device=self.output_device)\n\n if self.arg.phase == \"test\":\n self.model.load_state_dict(torch.load(self.arg.work_dir + '/params.pkl'))\n self.print_log('Load Model Completed')\n\n def load_optimizer(self):\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.arg.base_lr,\n momentum=self.arg.base_momentum,\n nesterov=self.arg.nesterov,\n weight_decay=self.arg.weight_decay)\n\n def print_log(self, str):\n local_time = time.asctime(time.localtime(time.time()))\n str = '[' + local_time + ']' + str\n print(str)\n file = '{}/log.txt'.format(self.arg.work_dir) \n with open(file, 'a') as f:\n print(str, file=f)\n\n def train(self, epoch, save_model=False):\n self.model.train()\n self.print_log('Training epoch{}'.format(epoch + 1))\n loss = nn.CrossEntropyLoss()\n for batch_idx, (data, label) in enumerate(self.data_loader['train']):\n data = data.cuda(self.output_device)\n label = label.cuda(self.output_device)\n pred = self.model(data)\n print(\"pred\")\n pred = pred.view(-1, self.num_seg_class)\n label = label.view(-1)\n cross_loss = loss(pred, label)\n self.optimizer.zero_grad()\n cross_loss.backward()\n print(\"backward\")\n self.optimizer.step()\n print(\"step\")\n if batch_idx % self.arg.log_interval == 0:\n self.print_log('\\tBatch({}/{}). Loss:{:.4f} lr:{:.6f}'.format(\n batch_idx, len(self.data_loader['train']),\n cross_loss.item(), self.arg.base_lr))\n if save_model:\n torch.save(self.model.state_dict(), self.arg.work_dir + '/params.pkl')\n\n def eval(self, test=False):\n self.model.eval()\n loss = nn.CrossEntropyLoss()\n mloss = np.zeros(self.batch_length)\n acc = np.zeros(self.batch_length)\n macc = np.zeros((self.batch_length, self.num_seg_class))\n for batch_idx, (data, label) in enumerate(self.data_loader['val']):\n data = data.cuda(self.output_device)\n label = label.cuda(self.output_device)\n pred = self.model(data)\n pred = pred.view(-1, self.num_seg_class)\n label = label.view(-1)\n cross_loss = loss(pred, label)\n mloss[batch_idx] = float(cross_loss.item())\n pred_choice = pred.max(1)[1]\n acc[batch_idx] = np.float(pred_choice.eq(\n label).cpu().sum()) / len(label)\n for cat in range(self.num_seg_class):\n pred_cat = (pred_choice == cat).cpu().numpy()\n label_cat = (label == cat).cpu().numpy()\n label_num = label_cat.sum()\n correct_num = label_cat[np.where(pred_cat > 0)].sum()\n if label_num == 0:\n macc[batch_idx][cat] = -1\n else:\n macc[batch_idx][cat] = correct_num / label_num\n\n mloss = mloss.mean()\n acc = acc.mean()\n macc = macc[np.where(macc > 0)].mean()\n self.print_log(\n 'Evaluation Loss:{:.4f} accuracy:{:.4f} maccuracy:{:.4f}'.format(\n mloss, acc, macc))\n \n if test:\n data, label = self.val_dataset[0]\n print(data)\n data = data.cuda(self.output_device)\n label = label.cuda(self.output_device)\n data.unsqueeze_(0)\n pred = self.model(data)\n pred = pred.max(2)[1]\n print(pred)\n\n data.squeeze_(0), pred.squeeze_(0)\n self.show_image(data, pred, label)\n \n def start(self):\n if self.arg.phase == 'train':\n self.print_log('{}:{}'.format(self.arg.phase, self.cat))\n for epoch in range(self.arg.num_epoch):\n save_model = ((epoch + 1) % self.arg.save_interval == 0) or (\n epoch + 1 == self.arg.num_epoch)\n eval_model = ((epoch + 1) % self.arg.eval_interval == 0) or (\n epoch + 1 == self.arg.num_epoch)\n self.train(epoch, save_model=save_model)\n if eval_model:\n self.eval()\n \n if self.arg.phase == 'test':\n self.print_log('{}:{}'.format(self.arg.phase, self.cat))\n self.eval(test=True)\n \n def show_image(self, pts, pred_seg, seg):\n # print point according to label\n print(pts.size())\n print(pred_seg.size())\n print(seg.size())\n \n pts = pts.cpu().numpy()\n pred_seg = pred_seg.cpu().detach().numpy()\n seg = seg.cpu().numpy()\n\n print(pts.shape)\n print(pred_seg.shape)\n print(seg.shape)\n labels = np.unique(seg)\n ax = plt.subplot(121, projection='3d')\n color = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'violet']\n for i, label in enumerate(labels):\n ps = np.take(pts, np.where(seg == label)[0], axis=0)\n ax.scatter(ps[:, 0], ps[:, 1], ps[:, 2], c=color[i])\n\n labels = np.unique(pred_seg)\n ax = plt.subplot(122, projection='3d')\n color = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'violet']\n for i, label in enumerate(labels):\n ps = np.take(pts, np.where(pred_seg == label)[0], axis=0)\n ax.scatter(ps[:, 0], ps[:, 1], ps[:, 2], c=color[i])\n plt.show()\n\n\ndef str2bool(v):\n if v.lower in ('yes', 'true', 't', 'y', '1'):\n return True\n else:\n return False\n\n\ndef import_class(name):\n components = name.split('.')\n mod = __import__(components[0])\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod\n\n\ndef work():\n parser = get_parser()\n p = parser.parse_args()\n if p.config is not None:\n with open(p.config, 'r') as f:\n default_arg = yaml.load(f)\n key = vars(p).keys()\n for k in default_arg.keys():\n if k not in key:\n print(k)\n print('Wrong arg!')\n parser.set_defaults(**default_arg)\n\n arg = parser.parse_args()\n for k, v in vars(arg).items():\n print('{}: {}'.format(str(k), str(v)))\n for cat in arg.class_choice:\n processor = Processor(arg, cat)\n processor.start()\n print('Completed')\n\n\nif __name__ == '__main__':\n work()","sub_path":"train_val_shapenet.py","file_name":"train_val_shapenet.py","file_ext":"py","file_size_in_byte":10148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"223333061","text":"import argparse\nimport cv2\nimport os\n\nimport torch\nfrom torch.nn import DataParallel\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom datasets.coco import CocoTrainDataset\nfrom datasets.transformationsV3 import ConvertKeypoints, Scale, Rotate, CropPad, CropPad3, Flip\nfrom modules.get_parameters import get_parameters_conv, get_parameters_bn, get_parameters_conv_depthwise\nfrom models.with_mobilenet import PoseEstimationWithMobileNet\nfrom modules.loss import l2_loss\nfrom modules.load_state import load_state, load_from_mobilenet\nfrom val import evaluate\nimport numpy as np\n\ncv2.setNumThreads(0)\ncv2.ocl.setUseOpenCL(False) # To prevent freeze of DataLoader\n\n\ndef test_dataset(prepared_train_labels, train_images_folder, num_refinement_stages, base_lr, batch_size, batches_per_iter,\n num_workers, checkpoint_path, weights_only, from_mobilenet, checkpoints_folder, log_after,\n val_labels, val_images_folder, val_output_name, checkpoint_after, val_after):\n\n stride = 8\n sigma = 7\n path_thickness = 1\n dataset = CocoTrainDataset(prepared_train_labels, train_images_folder,\n stride, sigma, path_thickness,\n transform=transforms.Compose([\n ConvertKeypoints(),\n Scale(),\n Rotate(pad=(128, 128, 128)),\n CropPad3(pad=(128, 128, 128)),\n Flip()]))\n #dataset = CocoTrainDataset(prepared_train_labels, train_images_folder,\n # stride, sigma, path_thickness,\n # transform=transforms.Compose([\n # ConvertKeypoints(),\n # Scale(),\n # Rotate(pad=(128, 128, 128)),\n # CropPad(pad=(128, 128, 128),center_perterb_max=40, crop_x=1920, crop_y=1920),\n # Flip()]))\n #dataset = CocoTrainDataset(prepared_train_labels, train_images_folder,\n # stride, sigma, path_thickness,\n # transform=transforms.Compose([\n # ConvertKeypoints(),\n # CropPad2(pad=(128, 128, 128)),\n # Flip()]))\n\n for i in range (0, 30):\n batch_data = dataset.__getitem__(i)\n\n print('batch data: {}'.format(batch_data))\n\n images = batch_data['image']\n keypoint_masks = batch_data['keypoint_mask']\n paf_masks = batch_data['paf_mask']\n keypoint_maps = batch_data['keypoint_maps']\n paf_maps = batch_data['paf_maps']\n\n print('images shape: {}'.format(images.shape))\n\n images = np.moveaxis(images, [0, 2], [2, 0]) \n #print('image shape: {}'.format(image.shape))\n cv2.imwrite(\"imgage_tmp.jpg\", images * 255)\n #print('keypoint_masks: {}'.format(keypoint_masks.shape))\n #print('keypoint_maps: {}'.format(keypoint_maps.shape))\n #for j in range(0, 19):\n # mask = keypoint_masks[0,j,:,:].cpu().numpy()\n # cv2.imwrite('mask_tmp_'+str(j)+'.jpg', mask * 255) \n #for j in range(0, 19):\n # mask = keypoint_maps[0,j,:,:].cpu().numpy()\n # cv2.imwrite('keypoint_maps_tmp_'+str(j)+'.jpg', mask * 255) \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--prepared-train-labels', type=str, required=True,\n help='path to the file with prepared annotations')\n parser.add_argument('--train-images-folder', type=str, required=True, help='path to COCO train images folder')\n parser.add_argument('--num-refinement-stages', type=int, default=1, help='number of refinement stages')\n parser.add_argument('--base-lr', type=float, default=4e-5, help='initial learning rate')\n parser.add_argument('--batch-size', type=int, default=80, help='batch size')\n parser.add_argument('--batches-per-iter', type=int, default=1, help='number of batches to accumulate gradient from')\n parser.add_argument('--num-workers', type=int, default=8, help='number of workers')\n parser.add_argument('--checkpoint-path', type=str, required=True, help='path to the checkpoint to continue training from')\n parser.add_argument('--from-mobilenet', action='store_true',\n help='load weights from mobilenet feature extractor')\n parser.add_argument('--weights-only', action='store_true',\n help='just initialize layers with pre-trained weights and start training from the beginning')\n parser.add_argument('--experiment-name', type=str, default='default',\n help='experiment name to create folder for checkpoints')\n parser.add_argument('--log-after', type=int, default=100, help='number of iterations to print train loss')\n\n parser.add_argument('--val-labels', type=str, required=True, help='path to json with keypoints val labels')\n parser.add_argument('--val-images-folder', type=str, required=True, help='path to COCO val images folder')\n parser.add_argument('--val-output-name', type=str, default='detections.json',\n help='name of output json file with detected keypoints')\n parser.add_argument('--checkpoint-after', type=int, default=5000,\n help='number of iterations to save checkpoint')\n parser.add_argument('--val-after', type=int, default=5000,\n help='number of iterations to run validation')\n args = parser.parse_args()\n\n checkpoints_folder = '{}_checkpoints'.format(args.experiment_name)\n if not os.path.exists(checkpoints_folder):\n os.makedirs(checkpoints_folder)\n\n test_dataset(args.prepared_train_labels, args.train_images_folder, args.num_refinement_stages, args.base_lr, args.batch_size,\n args.batches_per_iter, args.num_workers, args.checkpoint_path, args.weights_only, args.from_mobilenet,\n checkpoints_folder, args.log_after, args.val_labels, args.val_images_folder, args.val_output_name,\n args.checkpoint_after, args.val_after)\n","sub_path":"test-dataset.py","file_name":"test-dataset.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"229220490","text":"from Banco import Banco\nfrom Empregados import Empregados\nfrom datetime import datetime\n\nclass Horista(Empregados):\n\n def __init__(self,id_emp = \"--------\",data_cad = \"---------\",nome =\"--------\",endereco=\"--------\",tipo = \"--------\",pagamento = \"--------\",agenda_emp = \"-----------\",dia = \"------------\",mes= \"-------------\",sindicato = \"------------\",hora =\"------------\" ):\n super().__init__(id_emp=id_emp, data_cad=data_cad, nome=nome, endereco=endereco, tipo=tipo, pagamento=pagamento, agenda_emp=agenda_emp, dia=dia, mes=mes, sindicato=sindicato)\n self.hora = hora \n\n def setcadrasta (self,id_emp):\n self.tipo = \"Horista\"\n self.agenda_emp = \"Semanalmente\"\n super().cadrastra(id_emp)\n Horista.setHora(self)\n Horista.setData(self)\n\n def setData(self):\n self.data_cad = datetime.now()\n self.dia = \"Sexta-feira\"\n\n def modificar_cadrastro(self):\n i=0\n while(i!=1):\n k=int(input(\"Deseja ALTERA qual dados do empregado:\\n1-Nome\\n2-Endereço\\n3-Forma de Pagamento\\n4-Sindicato\\n5-Valor hora\\n>>>\"))\n if k == 1:\n super().setNome()\n i=1\n elif k==2:\n super().setEndereco()\n i=1\n elif k==3:\n super().setPagamento()\n i=1\n elif k==4:\n super().setSindicato()\n i=1\n elif k==5:\n Horista.setHora(self)\n i=1\n else:\n print(\"Opção Inválida\")\n\n def setHora(self):\n hora = float(input(\"Digite VALOR/HORA do empregado\\n>>>\"))\n self.hora = hora\n \n def toEmpregado(self):\n super().toEmpregados()\n print(\" Valor/Hora: {}\".format(self.hora))\n\n def receber(self):\n total_geral =0\n super().dados_receber()\n total=self.quant_receber()\n tx_sind = super().desconto()\n total_geral = total - tx_sind\n print(\"Total a receber: {:.2f}\".format(total_geral))\n\n def quant_receber(self):\n total = 0\n for i in self.pontos:\n hora = i.getPonto()\n if hora > 8 :\n extra = (hora - 8)*self.hora*1.5\n valor = 8*self.hora\n else:\n valor = hora*self.hora\n extra = 0\n total = total + valor + extra\n print(\"Total por Horas trabalhadas: {:.2f}\".format(total))\n return total\n\n ","sub_path":"Horista.py","file_name":"Horista.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"259259066","text":"\"\"\" Data for training and testing \"\"\"\nfrom __future__ import division\nimport random\nimport glob\nimport scipy.io as sio\nimport numpy as np\n\n# The size of our audio sample\nINTERVAL_SIZE = 1024\nFREQUENCY = 16000\n\ndef init_dict():\n \"\"\" Dictionary of monophones \"\"\"\n with open(\"monophones\") as mono_file:\n counter = 0\n c = {}\n for line in mono_file:\n line = line.rstrip('\\n')\n c[line] = counter\n counter = counter + 1\n return c\n\ndef get_alt_dict():\n \"\"\" Alternative dictionary of monophones only 39 \"\"\"\n alt_dict = {'EH2': 10, 'K': 19,\n 'S': 28, 'L': 20, 'M': 21,\n 'SH': 29, 'N': 22, 'P': 26,\n 'OY0': 25, 'OY2': 25, 'OY1': 25, 'OW2': 24,\n 'T': 30, 'OW1': 24, 'EY0': 12, 'EY1': 12, 'EY2': 12,\n 'AW2': 4, 'AW1': 4, 'AW0': 4,\n 'br': 39, 'cg': 39, 'lg': 39, 'ls': 39, 'ns': 39, 'sil': 39, 'sp': 39,\n 'Z': 37, 'W': 35, 'D': 8, 'AH0': 2, 'AH1': 2, 'AH2': 2,\n 'B': 6, 'EH1': 10, 'EH0': 10, 'V': 34,\n 'IH1': 16, 'IH0': 16, 'IH2': 16,\n 'IY2': 17, 'IY1': 17, 'IY0': 17,\n 'R': 27, 'AY1': 5, 'ER0': 11,\n 'AE1': 1, 'AE2': 1, 'AO1': 3, 'AO2': 3,\n 'NG': 23, 'AA0': 0, 'AA2': 0, 'AA1': 0,\n 'G': 14, 'TH': 31,\n 'F': 13, 'DH': 9, 'HH': 15,\n 'UH1': 32, 'UH2': 32, 'UH0': 32,\n 'CH': 7, 'UW1': 33, 'UW0': 33, 'UW2': 33,\n 'OW0': 24, 'AE0': 1, 'AO0': 3, 'JH': 18,\n 'Y': 36, 'ZH': 38, 'AY2': 5, 'ER1': 11,\n 'AY0': 5, 'ER2': 11}\n return alt_dict\n\ndef replaceZeroes(data):\n min_nonzero = np.min(data[np.nonzero(data)])\n data[data == 0] = min_nonzero\n return data\n\ndef split_data():\n \"\"\" Split data so that training and testing do not overlap \"\"\"\n # path = \"../../clean_data/mat_normalized/*.mat\"\n path = \"mat_normalized/*.mat\"\n list_files = glob.glob(path)\n length = len(list_files)\n train_files = list_files[:length-100]\n test_files = list_files[length-100:length]\n\n return train_files, test_files\n\ndef next_train_batch(batch_size):\n \"\"\" Get next training batch \"\"\"\n data = np.zeros((batch_size, INTERVAL_SIZE))\n labels = np.zeros(batch_size)\n m_dict = init_dict()\n train, _ = split_data()\n for i in range(batch_size):\n # Randomly chooses a file from the training data\n fname = random.choice(train)\n # Loads the contents of the .mat file\n mat_contents = sio.loadmat(fname)\n # Choose random audio interval from the given .mat file\n aud_length = len(mat_contents['aud'])\n start_index = random.randint(0, aud_length - INTERVAL_SIZE)\n wave_form = mat_contents['aud'][start_index:start_index+INTERVAL_SIZE]\n data[i, :] = np.ravel(wave_form)\n # aud_length = len(mat_contents['aud'])\n # start_index = random.randint(0, aud_length - INTERVAL_SIZE)\n # input_fourier = mat_contents['aud'][start_index:start_index+INTERVAL_SIZE]\n # output_fourier = np.fft.rfft(input_fourier)\n # log_base_10 = np.log10(replaceZeroes(output_fourier))\n # abs_value = np.absolute(log_base_10)\n # data[i, :] = np.ravel(abs_value)\n # Find corresponding label\n time = (start_index + INTERVAL_SIZE) / (2* FREQUENCY)\n index = binary_search(mat_contents['intervals'], time)\n key = mat_contents['phonemes'][0, index][0]\n labels[i] = m_dict.get(key)\n\n return data, labels\n\ndef next_test_batch(batch_size):\n \"\"\" Get next testing batch \"\"\"\n data = np.zeros((batch_size, INTERVAL_SIZE))\n labels = np.zeros(batch_size)\n m_dict = init_dict()\n _, test = split_data()\n for i in range(batch_size):\n # Randomly chooses a file from the testing data\n fname = random.choice(test)\n # Loads the contents of the .mat file\n mat_contents = sio.loadmat(fname)\n # Choose random audio interval from the given .mat file\n aud_length = len(mat_contents['aud'])\n start_index = random.randint(0, aud_length - INTERVAL_SIZE)\n wave_form = mat_contents['aud'][start_index:start_index+INTERVAL_SIZE]\n data[i, :] = np.ravel(wave_form)\n # Find corresponding label\n time = (start_index + INTERVAL_SIZE) / (2* FREQUENCY)\n index = binary_search(mat_contents['intervals'], time)\n key = mat_contents['phonemes'][0, index][0]\n labels[i] = m_dict.get(key)\n return data, labels\n\ndef binary_search(intervals, time):\n \"\"\" Binary search for the interval index the time falls into \"\"\"\n length = len(intervals[0])\n lo = 0\n hi = length - 1\n\n while lo <= hi:\n mid = (lo + hi) // 2\n # print('this is an iteration')\n # print(lo)\n # print(mid)\n # print(hi)\n # print(intervals[0,mid])\n if intervals[0, mid] < time:\n if intervals[1, mid] >= time:\n break\n else:\n lo = mid + 1\n else:\n hi = mid - 1\n return mid","sub_path":"data_batch.py","file_name":"data_batch.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"420593202","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n# Reference: https://scikit-learn.org/stable/modules/svm.html#svm-classification\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn import svm\n\n\ndef main():\n x = np.array([[-3.0, -2.9], [0.5, 8.7], [2.9, 2.1],[-0.1, 5.2],[-4.0, 2.2],\n [-1.3, 3.7], [-3.4, 6.2], [-4.1, 3.4], [-5.1, 1.6], [1.9, 5.1], #--#\n [-2.0, -8.4], [-8.9, 0.2], [-4.2, -7.7], [-8.5, -3.2], [-6.7, -4.0],\n [-0.5, 9.2], [-5.3, 6.7], [-8.7, -6.4], [-7.1, -9.7], [-8.0, -6.3]])\n y = np.array([0]*10 + [1]*10)\n\n #fit the model\n clf = svm.SVC(kernel='poly', degree=2, coef0=1.0)\n for num in range(1,11): # repeat\n print(num)\n x_train = np.vstack([x[0:num], x[10:(10+num)]])\n y_train = np.hstack([y[0:num], y[10:(10+num)]])\n clf.fit(x_train, y_train)\n\n # create a mesh to plot\n x_min, x_max = x_train[:, 0].min() - 1, x_train[:, 0].max() + 1\n y_min, y_max = x_train[:, 1].min() - 1, x_train[:, 1].max() + 1\n h = 0.02\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # plot the line, the points, and the nearest vectors to the plane\n plt.clf()\n plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,\n zorder=10, facecolors='none', edgecolors='k') # support_vectors\n plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, zorder=10, s=20, edgecolors='k') # train data\n\n z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) # decision function\n margin = clf.decision_function(clf.support_vectors_)\n print(margin)\n\n # Put the result into a color plot\n z = z.reshape(xx.shape)\n colours = ([\"bisque\", \"lightskyblue\"])\n cmap = ListedColormap(colours)\n plt.pcolormesh(xx, yy, z > 0, cmap=cmap)\n plt.contour(xx, yy, z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5])\n\n plt.xlim(x_min, x_max)\n plt.ylim(y_min, y_max)\n\n plt.xticks(())\n plt.yticks(())\n plt.show()\n\nif __name__ == \"__main__\":\n main()","sub_path":"5SVM/code/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"501198340","text":"\n\nimport re\n\nfrom ..tokenization import FullTokenizer\n\nfrom ..utils import (\n get_or_make_label_encoder, BOS_TOKEN, EOS_TOKEN)\nfrom ..create_generators import create_pretraining_generator, create_single_problem_generator\n\nfrom .ner_data import gold_horse_ent_type_process_fn, read_ner_data\nfrom .preproc_decorator import preprocessing_fn\n\n\n@preprocessing_fn\ndef weibo_fake_cls(params, mode):\n \"\"\"Just a test problem to test multiproblem support\n\n Arguments:\n params {Params} -- params\n mode {mode} -- mode\n \"\"\"\n data = read_ner_data(file_pattern='data/ner/weiboNER*',\n proc_fn=gold_horse_ent_type_process_fn)\n if mode == 'train':\n data = data['train']\n else:\n data = data['eval']\n inputs_list = data['inputs'][:100]\n target_list = data['target'][:100]\n\n new_target_list = ['1' if len(set(t)) > 1 else '0' for t in target_list]\n\n return inputs_list, new_target_list\n\n\n@preprocessing_fn\ndef weibo_fake_seq2seq_tag(params, mode: str):\n\n data = read_ner_data(file_pattern='data/ner/weiboNER*',\n proc_fn=gold_horse_ent_type_process_fn)\n if mode == 'train':\n data = data['train']\n else:\n data = data['eval']\n inputs_list = data['inputs'][:100]\n target_list = data['target'][:100]\n new_target_list = [['1', '2'] for t in target_list]\n return inputs_list, new_target_list\n\n\ndef weibo_pretrain(params, mode):\n\n sentence_split = r'[.!?。?!]'\n\n tokenizer = FullTokenizer(vocab_file=params.vocab_file)\n data = read_ner_data(file_pattern='data/ner/weiboNER*',\n proc_fn=gold_horse_segment_process_fn)\n if mode == 'train':\n data = data['train']\n else:\n data = data['eval']\n inputs_list = data['inputs']\n\n segmented_list = []\n for document in inputs_list:\n segmented_list.append([])\n doc_string = ''.join(document)\n splited_doc = re.split(sentence_split, doc_string)\n for sentence in splited_doc:\n if sentence:\n segmented_list[-1].append(list(sentence))\n segmented_list = [doc for doc in segmented_list if doc]\n\n return create_pretraining_generator('weibo_pretrain',\n segmented_list,\n None,\n None,\n params,\n tokenizer,\n mode)\n\n\n@preprocessing_fn\ndef weibo_fake_seq_tag(params, mode):\n data = read_ner_data(file_pattern='data/ner/weiboNER*',\n proc_fn=gold_horse_ent_type_process_fn)\n if mode == 'train':\n data = data['train']\n else:\n data = data['eval']\n inputs_list = data['inputs'][:100]\n target_list = data['target'][:100]\n\n return inputs_list, target_list\n","sub_path":"bert_multitask_learning/data_preprocessing/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"578984202","text":"from 臺灣言語工具.音標系統.閩南語.臺灣閩南語羅馬字拼音 import 臺灣閩南語羅馬字拼音\nfrom 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器\nfrom 臺灣言語服務.kaldi.lexicon import 辭典輸出\nfrom csv import DictReader\nfrom sys import argv\nfrom 臺灣言語服務.Kaldi語料匯出 import Kaldi語料匯出\n\nimport os\nfrom os.path import join\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\n\nfrom 臺灣言語服務.Kaldi語料匯出 import Kaldi語料匯出\nfrom 臺灣言語服務.kaldi.lexicon import 辭典輸出\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n 'csv',\n type=str,\n )\n# parser.add_argument(\n# '辭典輸出函式',\n# type=str,\n# choices=[\n# mia\n# for mia in dir(辭典輸出)\n# if (\n# callable(getattr(辭典輸出, mia)) and\n# not mia.startswith(\"_\") and\n# mia != '漢字聲韻'\n# )\n# ],\n# help='選擇lexicon佮聲學單位格式'\n# )\n# parser.add_argument(\n# '語言文本',\n# type=str,\n# help='選擇語料的語言文本,產生lexicon辭典'\n# )\n parser.add_argument(\n '匯出路徑',\n type=str,\n help='kaldi的egs內底的s5資料夾'\n )\n\n def handle(self, *args, **參數):\n 辭典輸出物件 = 辭典輸出(臺灣閩南語羅馬字拼音, '拆做聲韻莫調')\n lexicon = Kaldi語料匯出.初使化辭典資料()['全部詞']\n 路 = set()\n 終點 = set()\n with open(參數['csv']) as tong:\n for kui, ku in enumerate(DictReader(tong), start=1):\n lmj = ku['Lô-má-jī'].strip()\n hj = ku['Hàn-jī'].strip()\n if lmj:\n pianho = '{}|{}|{}'.format(kui, hj, lmj).replace(' ', '_')\n 句物件 = 拆文分析器.建立句物件(lmj)\n 辭典格式, *_ = self.音節轉辭典格式(句物件, 辭典輸出物件, 詞條=pianho)\n lexicon.add(辭典格式)\n 路.add(\n '{0}\\t{1}\\t{2}\\t{2}'.format(\n 0, kui, pianho,\n )\n )\n for khioh in range(3):\n tsing = kui - khioh - 1\n if tsing > 0:\n 路.add(\n '{0}\\t{1}\\t{2}\\t{2}'.format(\n tsing, kui, pianho,\n )\n )\n 終點.add('{}\\t1'.format(kui))\n with open(join(參數['匯出路徑'], 'lexicon.txt'), 'w') as tong:\n print('\\n'.join(sorted(lexicon)), file=tong)\n with open(join(參數['匯出路徑'], 'ku.fst'), 'w') as tong:\n print('\\n'.join(sorted(路)), file=tong)\n print('\\n'.join(sorted(終點)), file=tong)\n\n @classmethod\n def 音節轉辭典格式(cls, 物件, 辭典輸出物件, 詞條=None):\n 新聲類 = set()\n 新韻類 = set()\n 新調類 = set()\n 聲韻陣列 = []\n for 字物件 in 物件.篩出字物件():\n if 字物件.敢是標點符號():\n continue\n try:\n 聲類, 韻類, 調類 = 辭典輸出物件.輸出函式(字物件)\n for 聲 in 聲類:\n 聲韻陣列.append(聲)\n 新聲類.add(聲)\n for 韻 in 韻類:\n 聲韻陣列.append(韻[1])\n 新韻類.add(韻)\n 新調類 |= 調類\n except ValueError:\n 聲韻陣列.append('SPN')\n if 詞條:\n 分詞 = ''.join(詞條.split())\n else:\n 分詞 = 物件.看分詞()\n 辭典格式 = '{}\\t{}'.format(分詞, ' '.join(聲韻陣列))\n return 辭典格式, 新聲類, 新韻類, 新調類\n","sub_path":"it_kaldi_format/sansing_text.py","file_name":"sansing_text.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"410156878","text":"#Lock User Analyzer\n\ndef CheckSequence(events):\n if events is None or len(events)==0:\n return 0\n sets = set()\n stack = []\n for i in range(len(events)):\n cur = events[i].split(\" \")\n if cur[0] == \"ACQUIRE\":\n if cur[1] in sets:\n return i+1\n sets.add(cur[1])\n stack.append(cur[1])\n else:\n if len(stack)!=0 and stack[len(stack)-1]==cur[1]:\n sets.remove(stack.pop())\n else:\n return i+1\n if len(stack)!=0:\n return len(events)+1\n return 0\n\nif __name__ ==\"__main__\":\n #events = [\"ACQUIRE 364\",\"ACQUIRE 84\",\"RELEASE 84\",\"ACQUIRE 1337\",\"RELEASE 1337\",\"RELEASE 364\"]\n events = [\"ACQUIRE 364\",\"ACQUIRE 84\",\"RELEASE 364\",\"RELEASE 84\"]\n #events = [\"ACQUIRE 364\",\"ACQUIRE 84\",\"ACQUIRE 84\"]\n count = CheckSequence(events)\n print(count)\n","sub_path":"Lock_user_analyzer.py","file_name":"Lock_user_analyzer.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"510430024","text":"import csv\nimport datetime\nimport requests\nimport xlsxwriter\n\n\nclass ReportingAPI:\n \"\"\"\n http://developers.giosg.com/reporting_http_api.html\n \"\"\"\n\n API_URL = 'https://api.giosg.com/api/reporting/v1'\n DAYS_URL = '/rooms/{}/chat-stats/daily/'\n HOURS_URL = '/rooms/{}/user-presence-counts/'\n\n ROOM = '84e0fefa-5675-11e7-a349-00163efdd8db'\n NUMBER_OF_DAYS = 3\n\n FIELDNAMES = ['date', 'hour_of_day', 'user_count']\n\n def __init__(self, token: str):\n self.session = requests.Session()\n self.session.headers.update({\n 'Authorization': 'Token {}'.format(token)\n })\n\n def _get_days(self, start_date, end_date):\n url = (self.API_URL + self.DAYS_URL).format(self.ROOM)\n response = self.session.get(url, params={\n 'start_date': start_date,\n 'end_date': end_date,\n })\n response.raise_for_status()\n return response.json()\n\n def _get_hours(self, date):\n url = (self.API_URL + self.HOURS_URL).format(self.ROOM)\n response = self.session.get(url, params={\n 'start_date': date,\n 'end_date': date, # getting data for one day\n })\n response.raise_for_status()\n return response.json()\n\n def get_busy_hours(self, start_date: datetime.date,\n end_date: datetime.date):\n days = self._get_days(start_date.strftime('%Y-%m-%d'),\n end_date.strftime('%Y-%m-%d'))\n\n days = sorted(\n days['by_date'],\n key=lambda date: date['conversation_count'],\n reverse=True,\n )[:self.NUMBER_OF_DAYS]\n\n result = []\n for day in days:\n hours = self._get_hours(day['date'])\n result.append({\n 'date': day['date'],\n 'conversation_count': day['conversation_count'],\n 'hours': hours['hourly'],\n })\n\n return result\n\n @staticmethod\n def show_busy_hours(busy_hours: list):\n for day in busy_hours:\n print('On {} there were {} chats'.format(\n day['date'], day['conversation_count']))\n print('-----------------')\n\n for hour in day['hours']:\n print('{}:00 there was {} users present'.format(\n hour['hour_of_day'], hour['user_count']))\n\n print()\n\n @classmethod\n def save_to_csv(cls, busy_hours: list, filename: str):\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=cls.FIELDNAMES)\n writer.writeheader()\n\n for day in busy_hours:\n for hour in day['hours']:\n writer.writerow(hour)\n\n @classmethod\n def save_to_excel(cls, busy_hours: list, filename: str):\n workbook = xlsxwriter.Workbook(filename)\n worksheet = workbook.add_worksheet()\n\n # write header\n for col, fieldname in enumerate(cls.FIELDNAMES):\n worksheet.write(0, col, fieldname)\n\n # write data\n row = 1\n for day in busy_hours:\n for hour in day['hours']:\n for col, fieldname in enumerate(cls.FIELDNAMES):\n worksheet.write(row, col, hour[fieldname])\n row += 1\n\n # draw the chart\n chart = workbook.add_chart({'type': 'column'})\n chart.set_x_axis({'name': 'Hour', 'min': 0})\n chart.set_y_axis({'name': 'Conversations'})\n\n for i, day in enumerate(busy_hours):\n chart.add_series({\n 'values': '=Sheet1!$C${}:$C${}'.format(i*24+2, i*24+25),\n 'name': day['date'],\n })\n\n worksheet.insert_chart('E1', chart)\n\n workbook.close()\n","sub_path":"reporting_api.py","file_name":"reporting_api.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"194810831","text":"import sys\nimport fileinput\nfrom math import ceil\nfrom time import sleep\n\ndef get_orp(word_length):\n percent = 0.35\n orp = int(ceil(word_length * percent))\n return orp if orp <= 5 else 5\n\ndef get_longest(text):\n return len(sorted(text, key=len, reverse=True)[0])\n \ndef get_spaces(word, max_length):\n max_orp = get_orp(max_length)\n if len(word) < 3:\n orp = len(word)\n else:\n orp = get_orp(len(word))\n prefix_space = (max_orp - orp)\n postfix_space = (max_length - len(word) - prefix_space)\n return (orp, prefix_space, postfix_space)\n\ndef color_orp_char(word, orp):\n RED = '\\x1b[91m'\n NORMAL = '\\x1b[0m'\n chars = list(word)\n chars.insert(orp, RED)\n chars.insert((orp + 2), NORMAL)\n return \"\".join(chars)\n\ndef print_word(word, orp_config):\n orp, prefix_space, postfix_space = orp_config\n orp = orp - 1\n print_string = (\" \" * prefix_space) + color_orp_char(word, orp) + (\" \" * postfix_space)\n print(\"{}\".format(print_string), flush=True, end='\\r')\n\ndef clean_articles(text):\n remove = (',', '.', '!', '?', '-', ';')\n for char in remove:\n text = text.replace(char, \" \")\n text = text.strip()\n #text = text.replace(\"\\n\", \" \")\n return text.split()\n\ndef spritz(wpm, text):\n sleep_interval = (60.0 / wpm)\n text = clean_articles(text)\n max_length = get_longest(text)\n\n for word in text:\n if word == \"\":\n sleep(sleep_interval * 3)\n continue\n word_sleep_interval = 0.01 * len(word)\n sleep(sleep_interval + word_sleep_interval)\n orp_config = get_spaces(word, max_length)\n print_word(word, orp_config)\n\ndef main():\n wpm = int(sys.argv[1])\n text = \"\"\n for line in fileinput.input(sys.argv[2:]):\n text += line\n\n spritz(wpm, text)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"clspritz.py","file_name":"clspritz.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"141019997","text":"#!/usr/bin/env python3\n\n#------------------------------------------------------------------------------#\n# wasp-mapping.py #\n#------------------------------------------------------------------------------#\n\n# Implementation of the re-mapping procedure to eliminate reference bias\n# detailed in WASP.\n\n\n\n\n#--------------------------------- Imports ------------------------------------#\n\n# Import some necessary modules. We'll use subprocess to call things like bwa,\n# anaconda-associated python, etc., Pool from multiprocessing to run\n# certain things in parallel, and argparse to parse arguments.\n\nimport subprocess\nfrom multiprocessing import Pool\nimport argparse\n\n\n\n\n#--------------------------- Low-level functions ------------------------------#\n\n# Run bwa aln with options for multithreading and read trimming. \ndef bwa_aln(args, read_trimming, fastq, sai):\n with open(sai, 'w') as f:\n subprocess.call(\n [\n 'bwa',\n 'aln', \n '-t', str(args.max_processes),\n '-q', str(read_trimming),\n args.reference_genome,\n fastq\n ],\n stdout=f\n )\n\n\n\n\n# Run bwa samse\ndef bwa_samse(args, fastq, sai, sam):\n with open(sam, 'w') as f:\n subprocess.call(\n [\n 'bwa',\n 'samse',\n args.reference_genome,\n sai,\n fastq\n ],\n stdout=f\n )\n\n\n\n\n# Run bwa sampe. The -P setting increases speed but requires 4-5 GB of memory\ndef bwa_sampe(args, fastq1, sai1, fastq2, sai2, sam):\n with open(sam, 'w') as f:\n subprocess.call(\n [\n 'bwa',\n 'sampe',\n '-P',\n args.reference_genome,\n sai1,\n sai2,\n fastq1,\n fastq2\n ],\n stdout=f\n )\n\n\n\n\n# run STAR\ndef star(args, fastq_list, prefix):\n subprocess.call(\n [\n 'STAR',\n '--runThreadN', str(args.max_processes),\n '--genomeDir', args.STAR_genomeDir,\n '--readFilesIn', fastq_list[0],\n ] + [\n fastq_list[-1]\n ] * (\n args.paired_end\n ) + [\n '--readFilesCommand', 'zcat',\n \n '--twopassMode', 'Basic',\n \n '--sjdbGTFfile', args.STAR_sjdbGTFfile,\n \n '--outFilterType', 'BySJout',\n '--outFilterMultimapNmax', '20',\n '--alignSJoverhangMin', '8',\n '--alignSJDBoverhangMin', '1',\n '--outFilterMismatchNmax', '999',\n '--alignIntronMin', '20',\n '--alignIntronMax', '1000000',\n '--alignMatesGapMax', '1000000',\n \n '--outSAMmapqUnique', '50',\n '--outMultimapperOrder', 'Random',\n '--outSAMmultNmax', '1',\n \n '--outSAMattributes', 'NH', 'HI', 'AS', 'nM', 'MD',\n \n '--outSAMunmapped', 'Within',\n '--outFilterMismatchNoverLmax', '0.04',\n '--sjdbScore', '1',\n '--genomeLoad', 'NoSharedMemory',\n '--outSAMheaderHD', '@HD VN:1.4 SO:unsorted',\n \n '--outFileNamePrefix', prefix\n ]\n )\n\n\n\n\n\n# Run samtools view with BAM output and options for base quality filtering and\n# multithreading\ndef samtools_view(args, sam, bam):\n with open(bam, 'w') as f:\n subprocess.call(\n [\n 'samtools',\n 'view',\n '-Sbq', '30',\n '-@', str(args.max_processes),\n sam\n ],\n stdout=f\n )\n\n\n\n\n# Remove supplementary alignments from a BAM file\ndef remove_supplementary(args, bam, nosupbam):\n with open(nosupbam, 'w') as f:\n subprocess.call(\n [\n 'samtools',\n 'view',\n '-b',\n '-F', '0x800',\n '-@', str(args.max_processes),\n bam\n ],\n stdout=f\n )\n\n\n\n\n# Run samtools sort with multithreading options\ndef samtools_sort(args, bam, sortedbam):\n subprocess.call(\n [\n 'samtools',\n 'sort',\n '-m', ''.join((str(int(1024 / 47 * args.memory_limit)), 'M')),\n '-@', str(args.max_processes),\n '-o', sortedbam,\n bam\n ]\n )\n\n\n\n\n# Run samtools index\ndef samtools_index(bam):\n subprocess.call(['samtools', 'index', bam])\n\n\n\n\n#--------------------------- Mid-level functions ------------------------------#\n\n# Initial bwa aln run - for aligning raw reads\ndef initial_bwa_aln(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.paired_end:\n bwa_aln(\n args,\n 15, \n ''.join([prefix, '_1.fastq']),\n ''.join([prefix, '_1.sai'])\n )\n bwa_aln(\n args,\n 15, \n ''.join([prefix, '_2.fastq']),\n ''.join([prefix, '_2.sai'])\n )\n else:\n bwa_aln(\n args,\n 15, \n ''.join([prefix, '.fastq']),\n ''.join([prefix, '.sai'])\n )\n\n\n\n\n# Initial SAM creation - for raw read alignment\ndef initial_bwa_sam(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.paired_end:\n bwa_sampe(\n args,\n ''.join([prefix, '_1.fastq']),\n ''.join([prefix, '_1.sai']),\n ''.join([prefix, '_2.fastq']),\n ''.join([prefix, '_2.sai']),\n ''.join([prefix, '.sam'])\n )\n subprocess.call(\n [\n 'rm',\n ''.join([prefix, '_1.sai']),\n ''.join([prefix, '_2.sai'])\n ]\n )\n else:\n bwa_samse(\n args,\n ''.join([prefix, '.fastq']),\n ''.join([prefix, '.sai']),\n ''.join([prefix, '.sam'])\n )\n subprocess.call(['rm', ''.join([prefix, '.sai'])])\n \n\n\n\n# Initial STAR run - for aligning raw RNA-seq reads\ndef initial_star(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.paired_end:\n star(\n args,\n [\n ''.join([prefix, '_1.fastq']),\n ''.join([prefix, '_2.fastq'])\n ],\n prefix\n )\n else:\n star(\n args,\n [\n ''.join([prefix, '.fastq'])\n ],\n prefix\n )\n\n\n\n\n# Perform the initial sort and index\ndef initial_sort_and_index(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.bam_start:\n samtools_view(\n args, \n ''.join([prefix, '.bam']),\n ''.join([prefix, '.filt.bam'])\n )\n samtools_sort(\n args, \n ''.join([prefix, '.filt.bam']),\n ''.join([prefix, '.sort.bam'])\n )\n subprocess.call(['rm', ''.join([prefix, '.filt.bam'])])\n else:\n if not args.rna_seq:\n samtools_view(\n args, \n ''.join([prefix, '.sam']),\n ''.join([prefix, '.bam'])\n )\n else:\n samtools_view(\n args, \n ''.join([prefix, 'Aligned.out.sam']),\n ''.join([prefix, '.bam'])\n )\n samtools_sort(\n args, \n ''.join([prefix, '.bam']),\n ''.join([prefix, '.sort.bam'])\n )\n subprocess.call(\n [\n 'rm',\n ''.join([prefix, '.bam']),\n ''.join([prefix, '.sam'])\n ]\n )\n samtools_index(''.join([prefix, '.sort.bam']))\n\n\n\n\n# Find intersecting SNPs\ndef find_intersecting_snps(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n subprocess.call(\n [\n args.anaconda_path,\n ''.join(\n [\n args.wasp_directory,\n '/mapping/find_intersecting_snps.py'\n ]\n ),\n '--is_sorted',\n '--output_dir', ''.join(\n [\n args.operating_directory,\n '/',\n lane_name\n ]\n ),\n ''.join([prefix, '.sort.bam'])\n ] + [\n '--snp_tab', ''.join([args.hdf5_directory, '/snp_tab.h5']),\n '--snp_index', ''.join([args.hdf5_directory, '/snp_index.h5']),\n '--haplotype', ''.join([args.hdf5_directory, '/haplotypes.h5'])\n ] * (1-args.text_based_snp_files) + [\n '--snp_dir', args.hdf5_directory\n ] * args.text_based_snp_files + [\n '--is_paired_end'\n ] * args.paired_end\n )\n subprocess.call(\n [\n 'rm',\n ''.join([prefix, '.sort.bam']),\n ''.join([prefix, '.sort.bam.bai'])\n ]\n )\n\n\n\n\n# Repeat bwa aln for reads overlapping snps\ndef re_bwa_aln(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.paired_end:\n bwa_aln(\n args,\n 0, \n ''.join([prefix, '.sort.remap.fq1.gz']),\n ''.join([prefix, '.remap1.sai'])\n )\n bwa_aln(\n args,\n 0, \n ''.join([prefix, '.sort.remap.fq2.gz']),\n ''.join([prefix, '.remap2.sai'])\n )\n else:\n bwa_aln(\n args,\n 0, \n ''.join([prefix, '.sort.remap.fq.gz']),\n ''.join([prefix, '.remap.sai'])\n )\n\n\n\n\n# Repeat bwa sam for reads overlapping snps\ndef re_bwa_sam(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.paired_end:\n bwa_sampe(\n args,\n ''.join([prefix, '.sort.remap.fq1.gz']),\n ''.join([prefix, '.remap1.sai']),\n ''.join([prefix, '.sort.remap.fq2.gz']),\n ''.join([prefix, '.remap2.sai']),\n ''.join([prefix, '.remap.sam'])\n )\n else:\n bwa_samse(\n args,\n ''.join([prefix, '.sort.remap.fq.gz']),\n ''.join([prefix, '.remap.sai']),\n ''.join([prefix, '.remap.sam'])\n )\n\n\n\n\n# Repeat STAR for reads overlapping snps\ndef re_star(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.paired_end:\n star(\n args,\n [\n ''.join([prefix, '.sort.remap.fq1.gz']),\n ''.join([prefix, '.sort.remap.fq2.gz'])\n ],\n ''.join([prefix, '.remap'])\n )\n else:\n star(\n args,\n [\n ''.join([prefix, '.sort.remap.fq.gz'])\n ],\n ''.join([prefix, '.remap'])\n )\n\n\n\n\n# Remove intermediate files to avoid clutter\ndef clean_up_fq_sai(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.paired_end:\n subprocess.call(\n [\n 'rm',\n ''.join([prefix, '.sort.remap.fq1.gz']),\n ''.join([prefix, '.sort.remap.fq2.gz']),\n ''.join([prefix, '.sort.remap.single.fq.gz'])\n ] + [\n ''.join([prefix, '.remap1.sai']),\n ''.join([prefix, '.remap2.sai'])\n ] * (\n 1 - args.rna_seq\n )\n )\n else:\n subprocess.call(\n [\n 'rm',\n ''.join([prefix, '.sort.remap.fq.gz'])\n ] + [\n ''.join([prefix, '.remap.sai'])\n ] * (\n 1 - args.rna_seq\n )\n )\n\n\n\n\n# Convert remapped sam to bam, sort, and index\ndef re_sort_and_index(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if not args.rna_seq:\n samtools_view(\n args, \n ''.join([prefix, '.remap.sam']),\n ''.join([prefix, '.remap.bam'])\n )\n subprocess.call(['rm', ''.join([prefix, '.remap.sam'])])\n else:\n samtools_view(\n args, \n ''.join([prefix, '.remapAligned.out.sam']),\n ''.join([prefix, '.remap.bam'])\n )\n subprocess.call(['rm', ''.join([prefix, '.remapAligned.out.sam'])])\n samtools_sort(\n args, \n ''.join([prefix, '.remap.bam']),\n ''.join([prefix, '.remap.sort.bam'])\n )\n samtools_index(''.join([prefix, '.remap.sort.bam']))\n subprocess.call(['rm', ''.join([prefix, '.remap.bam'])])\n\n\n\n\n# Filter the remapped reads\ndef filter_remapped_reads_and_merge(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n subprocess.call(\n [\n args.anaconda_path,\n ''.join([args.wasp_directory, '/mapping/filter_remapped_reads.py']),\n ''.join([prefix, '.sort.to.remap.bam']),\n ''.join([prefix, '.remap.sort.bam']),\n ''.join([prefix, '.remap.keep.bam'])\n ]\n )\n subprocess.call(\n [\n 'samtools',\n 'merge',\n ''.join([prefix, '.remap.keep.merge.bam']),\n ''.join([prefix, '.sort.keep.bam']),\n ''.join([prefix, '.remap.keep.bam'])\n ]\n )\n subprocess.call(\n [\n 'rm',\n ''.join([prefix, '.sort.keep.bam']),\n ''.join([prefix, '.remap.keep.bam']),\n ''.join([prefix, '.sort.to.remap.bam']),\n ''.join([prefix, '.remap.sort.bam']),\n ''.join([prefix, '.remap.sort.bam.bai'])\n ]\n )\n\n\n\n\n# Perform the third sort and index\ndef sort_and_index_filtered(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n if args.paired_end:\n samtools_sort(args, \n ''.join([prefix, '.remap.keep.merge.bam']),\n ''.join([prefix, '.remap.keep.merge.sort.sup.bam'])\n )\n samtools_index(''.join([prefix, '.remap.keep.merge.sort.sup.bam']))\n remove_supplementary(args, \n ''.join([prefix, '.remap.keep.merge.sort.sup.bam']),\n ''.join([prefix, '.remap.keep.merge.sort.bam'])\n )\n subprocess.call(\n [\n 'rm',\n ''.join([prefix, '.remap.keep.merge.sort.sup.bam']),\n ''.join([prefix, '.remap.keep.merge.sort.sup.bam.bai'])\n ]\n )\n else:\n samtools_sort(args, \n ''.join([prefix, '.remap.keep.merge.bam']),\n ''.join([prefix, '.remap.keep.merge.sort.bam'])\n )\n subprocess.call(['rm', ''.join([prefix, '.remap.keep.merge.bam'])])\n samtools_index(''.join([prefix, '.remap.keep.merge.sort.bam']))\n\n\n\n\n# Remove duplicate reads (avoiding bias)\ndef remove_duplicates(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n subprocess.call(\n [\n args.anaconda_path,\n ''.join(\n [\n args.wasp_directory\n ] + [\n '/mapping/rmdup.py'\n ] * (\n 1 - args.paired_end\n ) + [\n '/mapping/rmdup_pe.py'\n ] * (\n args.paired_end\n )\n ),\n ''.join([prefix, '.remap.keep.merge.sort.bam']),\n ''.join([prefix, '.remap.keep.merge.rmdup.bam'])\n ]\n )\n subprocess.call(\n [\n 'rm',\n ''.join([prefix, '.remap.keep.merge.sort.bam']),\n ''.join([prefix, '.remap.keep.merge.sort.bam.bai'])\n ]\n )\n\n\n\n\n# Perform the final sort and index\ndef sort_and_index_dedupped(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n samtools_sort(args, \n ''.join([prefix, '.remap.keep.merge.rmdup.bam']),\n ''.join([prefix, '.remap.keep.merge.rmdup.sort.bam'])\n )\n samtools_index(''.join([prefix, '.remap.keep.merge.rmdup.sort.bam']))\n subprocess.call(['rm', ''.join([prefix, '.remap.keep.merge.rmdup.bam'])])\n\n\n\n\n# Generate a pileup\ndef pileup(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n subprocess.call(\n [\n 'samtools',\n 'mpileup',\n '-B',\n '-f', args.reference_genome,\n '-l', args.variant_positions_file,\n '-o', ''.join([prefix, '.pileup']),\n ''.join([prefix, '.remap.keep.merge.rmdup.sort.bam']),\n ]\n )\n\n\n\n\n# Perform binomial tests\ndef binomial_test(args, lane_name):\n prefix = '/'.join([args.operating_directory, lane_name, lane_name])\n subprocess.call(\n [\n 'Rscript',\n '/lab/aaylward-shared/wasp-mapping/binom.r',\n prefix,\n '6',\n str(args.false_discovery_rate),\n args.rsid_directory\n ]\n )\n \n\n\n\n#--------------------------- High-level functions -----------------------------#\n\n# Import the relevant lane_names\ndef create_lane_name_list(args):\n lane_names = []\n with open(args.lane_names) as f:\n for line in f:\n lane_names.append(line.replace('\\n', ''))\n return lane_names\n\n\n\n\n# Main\ndef main(args):\n\n # Import the list of samples to use\n lane_names = create_lane_name_list(args)\n \n # Construct the arguments that will be passed to pool for multiprocessing\n starmap_arguments = [(args, lane_name) for lane_name in lane_names]\n \n # If the input files are raw reads in fastq format, carry out the initial\n # alignment process\n if not args.bam_start:\n if not args.rna_seq:\n for lane_name in lane_names:\n initial_bwa_aln(args, lane_name)\n pool = Pool(\n processes=min(\n (\n args.max_processes, \n int(args.memory_limit/5),\n len(lane_names)\n )\n ) \n )\n pool.starmap(initial_bwa_sam, starmap_arguments)\n pool.close()\n pool.join()\n else:\n for lane_name in lane_names:\n initial_star(args, lane_name)\n \n # Sort and index the input BAM files, also filtering for quality\n for lane_name in lane_names:\n initial_sort_and_index(args, lane_name)\n \n # Carry out the \"find intersecting snps\" step\n pool = Pool(\n processes=min(\n (\n args.max_processes, \n int(args.memory_limit/4),\n len(lane_names)\n )\n ) \n )\n pool.starmap(find_intersecting_snps, starmap_arguments)\n pool.close()\n pool.join()\n \n # Remap indicated reads.\n if not args.rna_seq:\n for lane_name in lane_names:\n re_bwa_aln(args, lane_name)\n pool = Pool(\n processes=min(\n (\n args.max_processes,\n int(args.memory_limit/5),\n len(lane_names)\n )\n )\n )\n pool.starmap(re_bwa_sam, starmap_arguments)\n pool.close()\n pool.join()\n else:\n for lane_name in lane_names:\n re_star(args, lane_name)\n \n # Clean up intermediate files, and sort and index the remapped BAM files.\n for lane_name in lane_names:\n clean_up_fq_sai(args, lane_name)\n re_sort_and_index(args, lane_name)\n \n # Fiter the remapped reads and merge them with the kept reads, sort and\n # index the merged files\n pool = Pool(\n processes=min(\n (\n args.max_processes, \n int(args.memory_limit),\n len(lane_names)\n )\n ) \n )\n pool.starmap(filter_remapped_reads_and_merge, starmap_arguments)\n pool.close()\n pool.join()\n for lane_name in lane_names:\n sort_and_index_filtered(args, lane_name)\n \n # Remove duplicate reads, sort and index the dedupped files.\n pool = Pool(\n processes=min(\n (\n args.max_processes,\n len(lane_names)\n )\n ) \n )\n pool.starmap(remove_duplicates, starmap_arguments)\n pool.close()\n pool.join()\n for lane_name in lane_names:\n sort_and_index_dedupped(args, lane_name)\n \n # Generate pileups\n pool = Pool(\n processes=min(\n (\n args.max_processes, \n int(args.memory_limit),\n len(lane_names)\n )\n ) \n )\n pool.starmap(pileup, starmap_arguments)\n pool.close()\n pool.join()\n \n # Perform binomial tests\n pool = Pool(\n processes=min(\n (\n args.max_processes,\n int(args.memory_limit),\n len(lane_names)\n )\n ) \n )\n pool.starmap(binomial_test, starmap_arguments)\n pool.close()\n pool.join()\n\n\n\n\n# Parse arguments\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description=(\n 'Implementation of the re-mapping procedure to eliminate reference'\n 'bias detailed in WASP.'\n )\n )\n parser.add_argument(\n '--anaconda_path',\n required=True,\n type=str,\n help='Path to Anaconda distribution'\n )\n parser.add_argument(\n '--bam_start',\n required=False,\n action='store_true',\n help='Input files are BAM files, skip initial alignment'\n )\n parser.add_argument(\n '--false_discovery_rate',\n required=True,\n type=float,\n help='Set false discovery rate for binomial tests'\n )\n parser.add_argument(\n '--hdf5_directory',\n required=True,\n type=str,\n help='Path to HDF5 snp files directory'\n )\n parser.add_argument(\n '--lane_names',\n required=True,\n type=str,\n help='Path to text file listing lane_names to be considered'\n )\n parser.add_argument(\n '--max_processes',\n required=True,\n type=int,\n help='Maximum number of processes allowed'\n )\n parser.add_argument(\n '--memory_limit',\n required=True,\n type=int,\n help='Approximate memory limit in gigabytes'\n )\n parser.add_argument(\n '--operating_directory',\n required=True,\n type=str,\n help='Path to operating directory'\n )\n parser.add_argument(\n '--paired_end',\n required=False,\n action='store_true',\n help='Input data is paired-end'\n )\n parser.add_argument(\n '--reference_genome',\n required=True,\n type=str,\n help='Path to reference genome'\n )\n parser.add_argument(\n '--rna_seq',\n required=False,\n action='store_true',\n help='Use splice-aware alignment for RNA-seq reads'\n )\n parser.add_argument(\n '--rsid_directory',\n required=True,\n type=str,\n help='Path to RSID directory'\n )\n parser.add_argument(\n '--STAR_genomeDir',\n required=False,\n type=str,\n help='Path to STAR genome directory'\n )\n parser.add_argument(\n '--STAR_sjdbGTFfile',\n required=False,\n type=str,\n help='Path to GTF file'\n )\n parser.add_argument(\n '--text_based_snp_files',\n required=False,\n action='store_true',\n help='use text-based snp files instead'\n )\n parser.add_argument(\n '--variant_positions_file',\n required=True,\n type=str,\n help='Path to variant positions file'\n )\n parser.add_argument(\n '--wasp_directory',\n required=True,\n type=str,\n help='Path to WASP directory'\n )\n return parser.parse_args()\n\n\n\n\n#--------------------------------- Execute ------------------------------------#\n\nargs = parse_arguments()\nif args.memory_limit < 5:\n raise Exception('Please provide at least 5 GB of memory')\nmain(args)\n","sub_path":"wasp_mapping/wasp-mapping.py","file_name":"wasp-mapping.py","file_ext":"py","file_size_in_byte":24264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"295199303","text":"import datetime\n\nfrom django.contrib.admin.util import lookup_field, display_for_field, label_for_field,\\\n quote\nfrom django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,\n ORDER_VAR, PAGE_VAR, SEARCH_VAR)\nfrom django.contrib.admin.templatetags.admin_static import static\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.utils import formats\nfrom django.utils.html import escape, conditional_escape\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom django.utils.translation import ugettext as _\nfrom django.utils.encoding import smart_unicode, force_unicode\nfrom django.template import Library\nfrom django.template.loader import get_template\nfrom django.template.context import Context\nfrom django import template\nfrom django.contrib.admin.templatetags.admin_list import _boolean_icon\n\nregister = template.Library()\n\ndef url_for_result(cl, result):\n obj_id = None\n try:\n obj_id = getattr(result, 'resource_uri')\n except AttributeError:\n obj_id = getattr(result, 'id')\n \n uri = \"%s\" % mark_safe(obj_id)[1:]\n return uri\n\nclass ResultList(list):\n # Wrapper class used to return items in a list_editable\n # changelist, annotated with the form object for error\n # reporting purposes. Needed to maintain backwards\n # compatibility with existing admin templates.\n def __init__(self, form, *items):\n self.form = form\n super(ResultList, self).__init__(*items)\n\ndef results(cl):\n if cl.formset:\n for res, form in zip(cl.result_list, cl.formset.forms):\n yield ResultList(form, items_for_result(cl, res, form))\n else:\n for res in cl.result_list:\n yield ResultList(None, items_for_result(cl, res, None))\n\ndef result_hidden_fields(cl):\n if cl.formset:\n for res, form in zip(cl.result_list, cl.formset.forms):\n if form[cl.model._meta.pk.name].is_hidden:\n yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))\n\ndef result_headers(cl):\n \"\"\"\n Generates the list column headers.\n \"\"\"\n ordering_field_columns = cl.get_ordering_field_columns()\n for i, field_name in enumerate(cl.list_display):\n text, attr = label_for_field(field_name, cl.model,\n model_admin = cl.model_admin,\n return_attr = True\n )\n if attr:\n # Potentially not sortable\n\n # if the field is the action checkbox: no sorting and special class\n if field_name == 'action_checkbox':\n yield {\n \"text\": text,\n \"class_attrib\": mark_safe(' class=\"action-checkbox-column\"'),\n \"sortable\": False,\n }\n continue\n\n admin_order_field = getattr(attr, \"admin_order_field\", None)\n if not admin_order_field:\n # Not sortable\n yield {\n \"text\": text,\n \"sortable\": False,\n }\n continue\n\n # OK, it is sortable if we got this far\n th_classes = ['sortable']\n order_type = ''\n new_order_type = 'asc'\n sort_priority = 0\n sorted = False\n # Is it currently being sorted on?\n if i in ordering_field_columns:\n sorted = True\n order_type = ordering_field_columns.get(i).lower()\n sort_priority = ordering_field_columns.keys().index(i) + 1\n th_classes.append('sorted %sending' % order_type)\n new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]\n\n # build new ordering param\n o_list_primary = [] # URL for making this field the primary sort\n o_list_remove = [] # URL for removing this field from sort\n o_list_toggle = [] # URL for toggling order type for this field\n make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)\n\n for j, ot in ordering_field_columns.items():\n if j == i: # Same column\n param = make_qs_param(new_order_type, j)\n # We want clicking on this header to bring the ordering to the\n # front\n o_list_primary.insert(0, param)\n o_list_toggle.append(param)\n # o_list_remove - omit\n else:\n param = make_qs_param(ot, j)\n o_list_primary.append(param)\n o_list_toggle.append(param)\n o_list_remove.append(param)\n\n if i not in ordering_field_columns:\n o_list_primary.insert(0, make_qs_param(new_order_type, i))\n\n\n yield {\n \"text\": text,\n \"sortable\": True,\n \"sorted\": sorted,\n \"ascending\": order_type == \"asc\",\n \"sort_priority\": sort_priority,\n \"url_primary\": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),\n \"url_remove\": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),\n \"url_toggle\": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),\n \"class_attrib\": mark_safe(th_classes and ' class=\"%s\"' % ' '.join(th_classes) or '')\n }\n\n\ndef items_for_result(cl, result, form):\n \"\"\"\n Generates the actual list of data.\n \"\"\"\n first = True\n pk = cl.lookup_opts.pk.attname\n for field_name in cl.list_display:\n row_class = ''\n try:\n f, attr, value = lookup_field(field_name, result, cl.model_admin)\n except (AttributeError, ObjectDoesNotExist):\n result_repr = EMPTY_CHANGELIST_VALUE\n else:\n if f is None:\n if field_name == u'action_checkbox':\n row_class = ' class=\"action-checkbox\"'\n allow_tags = getattr(attr, 'allow_tags', False)\n boolean = getattr(attr, 'boolean', False)\n if boolean:\n allow_tags = True\n result_repr = _boolean_icon(value)\n else:\n result_repr = smart_unicode(value)\n # Strip HTML tags in the resulting text, except if the\n # function has an \"allow_tags\" attribute set to True.\n if not allow_tags:\n result_repr = escape(result_repr)\n else:\n result_repr = mark_safe(result_repr)\n else:\n if isinstance(f.rel, models.ManyToOneRel):\n field_val = getattr(result, f.name)\n if field_val is None:\n result_repr = EMPTY_CHANGELIST_VALUE\n else:\n result_repr = escape(field_val)\n else:\n result_repr = display_for_field(value, f)\n if isinstance(f, models.DateField)\\\n or isinstance(f, models.TimeField)\\\n or isinstance(f, models.ForeignKey):\n row_class = ' class=\"nowrap\"'\n if force_unicode(result_repr) == '':\n result_repr = mark_safe(' ')\n # If list_display_links not defined, add the link tag to the first field\n if (first and not cl.list_display_links) or field_name in cl.list_display_links:\n table_tag = {True:'th', False:'td'}[first]\n first = False\n #url = cl.url_for_result(result)\n url = url_for_result(cl, result)\n # Convert the pk to something that can be used in Javascript.\n # Problem cases are long ints (23L) and non-ASCII strings.\n if cl.to_field:\n attr = str(cl.to_field)\n else:\n attr = pk\n value = result.serializable_value(attr)\n result_id = repr(force_unicode(value))[1:]\n yield mark_safe(u'<%s%s>%s' % \\\n (table_tag, row_class, url, (cl.is_popup and ' onclick=\"opener.dismissRelatedLookupPopup(window, %s); return false;\"' % result_id or ''), conditional_escape(result_repr), table_tag))\n else:\n # By default the fields come from ModelAdmin.list_editable, but if we pull\n # the fields out of the form instead of list_editable custom admins\n # can provide fields on a per request basis\n if (form and field_name in form.fields and not (\n field_name == cl.model._meta.pk.name and\n form[cl.model._meta.pk.name].is_hidden)):\n bf = form[field_name]\n result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))\n else:\n result_repr = conditional_escape(result_repr)\n yield mark_safe(u'%s' % (row_class, result_repr))\n if form and not form[cl.model._meta.pk.name].is_hidden:\n yield mark_safe(u'%s' % force_unicode(form[cl.model._meta.pk.name]))\n\n\n@register.inclusion_tag(\"admin/change_list_results.html\")\ndef result_list(cl):\n \"\"\"\n Displays the headers and data list together\n \"\"\"\n headers = list(result_headers(cl))\n num_sorted_fields = 0\n for h in headers:\n if h['sortable'] and h['sorted']:\n num_sorted_fields += 1\n return {'cl': cl,\n 'result_hidden_fields': list(result_hidden_fields(cl)),\n 'result_headers': headers,\n 'num_sorted_fields': num_sorted_fields,\n 'results': list(results(cl))}","sub_path":"accounts/importer/templatetags/admin_custom.py","file_name":"admin_custom.py","file_ext":"py","file_size_in_byte":9458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"108303465","text":"from typing import List\n\nfrom shared.TreeNode import TreeNode\n\n\nclass Solution:\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n return inorder_iterative(root)\n # return inorder_rec(root)\n\n\ndef inorder_iterative(root: TreeNode) -> List[int]:\n # 32 ms\t13.8 MB\n if not root:\n return []\n\n result = []\n\n stack = [root]\n is_prev_node_traversed = False\n while len(stack) != 0:\n node = stack[-1]\n if not is_prev_node_traversed:\n while node.left:\n stack.append(node.left)\n node = node.left\n\n node = stack.pop()\n result.append(node.val)\n\n if node.right:\n stack.append(node.right)\n is_prev_node_traversed = False\n else:\n is_prev_node_traversed = True\n\n return result\n\n\ndef inorder_rec(root: TreeNode) -> List[int]:\n # 28 ms\t13.9 MB\n result = []\n\n def i(node: TreeNode):\n if node is None:\n return\n i(node.left)\n result.append(node.val)\n i(node.right)\n\n i(root)\n return result\n\n\nif __name__ == '__main__':\n s = Solution()\n node = TreeNode(1, right=TreeNode(2, left=TreeNode(3)))\n print(s.inorderTraversal(node))\n","sub_path":"python/094.py","file_name":"094.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"6633612","text":"from i8自动化测试框架.comment.pageDriver import Action\nfrom i8自动化测试框架.comment import getValueFromExcel\nfrom selenium.webdriver.common.by import By\nimport time\n\n\n\ntimestamp = str(int(round(time.time() * 1000)))\n\nclass itm3_tax_as_sale(Action):\n def __init__(self, driver=None):\n Action.__init__(self, driver)\n self.fileName = \"mi8-收入确认单.xls\"\n self.projectName = \"mi8收入确认单\"\n\n def getValue_d(self, x, y):\n value = getValueFromExcel.getExcelValue(self.fileName, x, y, sheetName=\"itm3_tax_as_sale_det\")\n return value\n\n def getValue_m(self, x, y):\n value = getValueFromExcel.getExcelValue(self.fileName, x, y, sheetName=\"itm3_tax_as_sale\")\n return value\n\n def test_收入确认单(self):\n try:\n self.getUrl(\"ITM\", \"收入确认单\")\n self.driver.implicitly_wait(5)\n self.maxWindows()\n self.driver.implicitly_wait(2)\n self.switchFrame(By.XPATH, \"/html[1]/body[1]/div[4]/div[2]/div[2]/div[1]/span[1]/div[1]/iframe[1]\")\n self.driver.implicitly_wait(1)\n time.sleep(5)\n creatNew = self.addNewForm() # 新增\n if creatNew:\n self.driver.implicitly_wait(2)\n self.switchToParentFrame()\n self.switchFrame(By.XPATH, \"/html[1]/body[1]/div[4]/div[2]/div[3]/div[1]/span[1]/div[1]/iframe[1]\")\n time.sleep(2)\n #####################################################################\n # 表头数据获取\n PhidFeetype = self.getValue_m(5, 5)\n Taxno = self.getValue_m(5, 6)\n PhidTaxOrg = self.getValue_m(5, 7)\n PhidProj = self.getValue_m(5, 10)\n Remaker = self.getValue_m(5, 14)\n #####################################################################\n time.sleep(2)\n self.clickElement(\"xpath\", \"//input[@name='PhidFeetype']/../../td[2]/div\")\n time.sleep(1)\n PhidFeetypeElement = \"//td[contains(text(),'%s')]\" % PhidFeetype\n self.clickElement(\"xpath\", PhidFeetypeElement)\n time.sleep(1)\n self.clickElement(\"xpath\", \"//input[@name='PhidTaxOrg']/../../td[2]/div\")\n time.sleep(1)\n self.simpleHelpChoose(PhidTaxOrg)\n time.sleep(1)\n # self.projectHelpChoose(PhidProj, By.NAME, \"PhidProj\")\n self.Submit(PhidProj, By.NAME, \"PhidProj\")\n time.sleep(1)\n #####################################################################\n # 明细数据获取\n PhidImposeName = self.getValue_d(5, 3)\n Havemoney = self.getValue_d(5, 4)\n TaxRate = self.getValue_d(5, 5)\n Amount = self.getValue_d(5, 6)\n Taxes = self.getValue_d(5, 7)\n #####################################################################\n time.sleep(1)\n self.clickElement(\"xpath\", \"//span[contains(text(),'增行')]\")\n time.sleep(1)\n self.clickElement(\"xpath\",\n \"/html[1]/body[1]/div[2]/div[1]/div[3]/div[2]/div[1]/table[1]/tbody[1]/tr[1]/td[6]/div[1]\")\n time.sleep(1)\n self.clickElement(\"xpath\", \"//input[@name='PhidImposeName']/../../td[2]/div\")\n time.sleep(1)\n self.simpleHelpChoose(PhidImposeName)\n time.sleep(1)\n self.clickElement(\"xpath\",\n \"/html[1]/body[1]/div[2]/div[1]/div[3]/div[2]/div[1]/table[1]/tbody[1]/tr[1]/td[7]/div[1]\")\n time.sleep(1)\n self.sendKeysByJs(Havemoney, By.NAME, \"Havemoney\", index=1)\n time.sleep(1)\n self.clickElement(\"xpath\",\n \"/html[1]/body[1]/div[2]/div[1]/div[3]/div[2]/div[1]/table[1]/tbody[1]/tr[1]/td[8]/div[1]\")\n time.sleep(1)\n self.clickElement(\"xpath\", \"//table[@class='x-form-trigger-wrap']//input[@name='TaxRate']\")\n TaxRateElement = \"//td[contains(text(),'%s')]\" % TaxRate\n time.sleep(1)\n self.clickElement(\"xpath\", TaxRateElement)\n time.sleep(1)\n self.clickElement(\"xpath\", \"//span[contains(text(),'确定')]/..//span[2]\")\n time.sleep(1)\n # 接下来就是单据保存核准的操作\n self.clickElement(\"xpath\", \"//span[contains(text(),'保存')]/../span[2]\")\n message = self.getText(By.XPATH, \"//div[contains(@class,'x-form-display-field')]\")\n if message.startswith(\"保存成功\"):\n self.clickElement(\"xpath\", \"//span[contains(text(),'确定')]/..//span[2]\")\n self.clickElements(By.XPATH, \"//span[contains(text(),'审核')]\", index=0)\n message2 = self.driver.find_element_by_id(\"messagebox-1001-displayfield-inputEl\").text\n if message2.startswith(\"审核成功!\"):\n self.logger.debug(\"当前单据:%s 审核成功!,即将执行excel回填操作\" % self.projectName)\n # saveValueToExcel.savaValueToExcel()\n else:\n self.logger.war(\"当前单据:%s 未正常核准,提示框信息为:%s\" % (self.projectName, message))\n self.clickElement(\"xpath\", \"//span[contains(text(),'确定')]/..//span[2]\")\n else:\n self.logger.error(\"%s单据保存失败,提示信息为%s\" % (self.projectName, message))\n else:\n self.logger.error(\"单据新增时出错,请检查%s是否正常\" % self.projectName)\n except:\n self.logger.error(\"单据操作时出现异常,请检查:%s是否正常\" % self.projectName)\n\n\n\n\n\n","sub_path":"i8自动化测试框架/case/MI8/test_收入确认单.py","file_name":"test_收入确认单.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"537231669","text":"#!/usr/bin/python3\n'''\nModule where the integers representing the Pascal’s triangle\n'''\n\n\ndef pascal_triangle(n):\n '''Pascal priniting functions'''\n if n <= 0:\n return []\n\n pas_r = [[1]]\n if n > 1:\n pas_r.append([1, 1])\n for ind in range(3, n + 1):\n pas_r.append([1] + list(map(\n lambda i: pas_r[ind - 2][i] + pas_r[ind - 2][i + 1], range(\n len(pas_r[ind - 2]) - 1))) + [1])\n return pas_r\n","sub_path":"0x1F-pascal_triangle/0-pascal_triangle.py","file_name":"0-pascal_triangle.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"77799941","text":"\"\"\"\nThis module work to help load data to sqlite.\nusage:\n#\nimport lib.sqlite_command as sql\n#\nclass:\n\nfunction:\n check_sql_table\n check_table_columns\n init_sql_db\n init_sql_db_many_table\n insert_one_record_to_sql_table\n dict2sql_table\n sql_table2dict\n sql_table_row_num\nlog:\n Yuxing Xu\n 2018.02.05 new module\n 2018.02.05 update module\n\"\"\"\nimport sqlite3\nfrom retry import retry\n\n\ndef check_sql_table(db_file):\n conn = sqlite3.connect(db_file)\n table_list = conn.execute('SELECT name FROM sqlite_master WHERE type=\\'table\\'').fetchall()\n conn.close()\n\n output = []\n for i in table_list:\n output.append(i[0])\n return tuple(output)\n\n\ndef check_table_columns(db_file, table_name):\n conn = sqlite3.connect(db_file)\n content = conn.execute('PRAGMA table_info(' + table_name + ')').fetchall()\n conn.close()\n\n table_head = []\n for i in content:\n table_head.append(i[1])\n return tuple(table_head)\n\n\ndef all_database_stat(db_file):\n conn = sqlite3.connect(db_file)\n table_list = conn.execute('SELECT name FROM sqlite_master WHERE type=\\'table\\'').fetchall()\n\n output = []\n for i in table_list:\n output.append(i[0])\n output = tuple(output)\n\n db_stat = {}\n for i in output:\n table_record_num = conn.execute('SELECT count(*) FROM ' + i).fetchall()\n db_stat[i] = table_record_num[0][0]\n\n conn.close()\n return db_stat\n\n\ndef init_sql_db(db_file, table_name, column_name_list):\n import os\n from lib.shell_lib.base_function import cmd_run\n if os.path.exists(db_file):\n cmd_run(\"rm %s\" % db_file)\n\n conn = sqlite3.connect(db_file)\n\n create_cmd = '''CREATE TABLE ''' + table_name + \" (\"\n for column_name in column_name_list:\n create_cmd = create_cmd + \"\\\"\" + column_name + \"\\\", \"\n create_cmd = create_cmd.rstrip(\", \")\n create_cmd = create_cmd + \")\"\n conn.execute(create_cmd)\n\n conn.commit()\n conn.close()\n\n\ndef init_sql_db_many_table(db_file, table_columns_dict):\n import os\n from lib.shell_lib.base_function import cmd_run\n if os.path.exists(db_file):\n cmd_run(\"rm %s\" % db_file)\n\n conn = sqlite3.connect(db_file)\n\n for table_name in table_columns_dict:\n create_cmd = '''CREATE TABLE ''' + table_name + \" (\"\n for column_name in table_columns_dict[table_name]:\n create_cmd = create_cmd + column_name + \", \"\n create_cmd = create_cmd.rstrip(\", \")\n create_cmd = create_cmd + \")\"\n conn.execute(create_cmd)\n\n conn.commit()\n conn.close()\n\n\ndef insert_one_record_to_sql_table(data_tuple, columns_list_tuple, db_file, table_name):\n conn = sqlite3.connect(db_file)\n insert_cmd = '''INSERT INTO ''' + table_name + \" \" + tuple(columns_list_tuple).__str__() \\\n + ''' VALUES ''' + tuple(data_tuple).__str__()\n conn.execute(insert_cmd)\n conn.commit()\n conn.close()\n\n\ndef dict2sql_table(data_dict, table_columns_dict, db_file, table_name):\n column_name = table_columns_dict[table_name]\n\n if len(column_name) != len(data_dict[0]):\n raise ValueError\n\n def iter_sql_insert(data_dict):\n for i in data_dict:\n table_insert = data_dict[i]\n yield tuple(table_insert)\n\n conn = sqlite3.connect(db_file)\n insert_cmd = '''INSERT INTO ''' + table_name + ''' VALUES (''' + \"?,\" * len(column_name)\n insert_cmd = insert_cmd.rstrip(\",\")\n insert_cmd = insert_cmd + \")\"\n conn.executemany(insert_cmd, iter_sql_insert(data_dict))\n conn.commit()\n conn.close()\n\n\ndef sql_table2dict(db_file, table_name, top=0, key=\"\", print_if=False):\n table_head = check_table_columns(db_file, table_name)\n\n conn = sqlite3.connect(db_file)\n if not top == 0:\n content = conn.execute('SELECT * FROM ' + table_name + ' LIMIT ?', (top,)).fetchall()\n else:\n content = conn.execute('SELECT * FROM ' + table_name).fetchall()\n\n conn.close()\n\n output_dict = {}\n num = 0\n for i in content:\n num = num + 1\n if key == \"\":\n output_dict[num] = i\n else:\n key_value = i[table_head.index(key)]\n output_dict[key_value] = i\n\n if print_if is True:\n printer = \"\"\n for i in table_head:\n printer = printer + i + \"\\t\"\n printer = printer.rstrip(\"\\t\") + \"\\n\" + \"---\" + \"\\n\"\n for i in content:\n for j in i:\n printer = printer + str(j) + \"\\t\"\n printer = printer.rstrip(\"\\t\") + \"\\n\"\n\n return output_dict, table_head\n\n\ndef sql_table_row_num(db_file, table_name):\n conn = sqlite3.connect(db_file)\n record_len = conn.execute(\"select count(*) from %s\" % table_name).fetchall()[0][0]\n conn.close()\n return record_len\n\n\ndef dict_context_yield(list_input):\n for query in list_input:\n yield query\n\n\n@retry()\ndef sqlite_write(record_list, db_file, table_name, columns_list):\n \"\"\"\n :param record_list:\n record_list=[(\"A1\",\"B1\",\"C1\",\"D1\",\"E1\"),(\"A2\",\"B2\",\"C2\",\"D2\",\"E2\"),...]\n :param db_file:\n :param table_name: table should be inited\n :param columns_list:\n columns_list = [\"A\",\"B\",\"C\",\"D\",\"E\"]\n :return:\n \"\"\"\n conn = sqlite3.connect(db_file)\n insert_cmd = '''INSERT INTO ''' + table_name + ''' VALUES (''' + \"?,\" * len(columns_list)\n insert_cmd = insert_cmd.rstrip(\",\")\n insert_cmd = insert_cmd + \")\"\n\n conn.executemany(insert_cmd, dict_context_yield(record_list))\n\n conn.commit()\n conn.close()\n\n\ndef sqlite_select_by_a_key(db_file, table_name, key_name, value_tuple):\n conn = sqlite3.connect(db_file)\n if len(value_tuple) == 1:\n content = conn.execute(\n \"SELECT * FROM %s WHERE \\\"%s\\\" = ?\" % (table_name, key_name), value_tuple).fetchall()\n else:\n content = conn.execute(\n \"SELECT * FROM %s WHERE \\\"%s\\\" IN \" % (table_name, key_name) + value_tuple.__str__()).fetchall()\n conn.close()\n return content\n","sub_path":"lib/database/sqlite_command.py","file_name":"sqlite_command.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"317815337","text":"from django.shortcuts import render\nfrom django_auth_ldap.config import LDAPSearch\nfrom django.db import connections\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate\nfrom .dict import *\nfrom .utils import *\nfrom .MISModel import *\nfrom django.db.models import Subquery\nfrom django.contrib.auth.models import User, Group\nfrom django.http import HttpResponse\nimport requests, uuid, cx_Oracle, json , os, logging\nimport ldap, unicodedata, django.contrib.auth, datetime, schedule, time, threading, base64\nfrom django.conf import settings \nfrom django import template\n\n\n# Create your views here.\ndef initPage (request):\n return redirect('Login')\n\ndef Login(request):\n db_logger = logging.getLogger('db')\n # ldap.PORT = 8433\n ldap.PORT = 636\n\n # connect to HCNET\n # con = ldap.initialize('LDAP://vnhqpdc03.hcnet.vn:389')\n\n # connect to HCG\n # con = ldap.initialize('LDAP://vn-bw1001.hcg.homecredit.net')\n con = ldap.initialize('LDAP://vn-ldaps.hcg.homecredit.net')\n\n if request.method == \"POST\":\n # HCNET\n # base = 'ou=hcnet_users,dc=hcnet,dc=vn' \n\n # HCG\n base = 'ou=Users,ou=VN,dc=hcg,dc=homecredit,dc=net'\n\n search = ['displayName','mail','givenName','sn','sAMAccountName','description','mobile','department', 'userPrincipalName','name/cn']\n scope = ldap.SCOPE_SUBTREE\n username = request.POST.get('username')\n # this password is already encoded\n password = request.POST.get('password') \n # decoded password\n password = base64.b64decode(password).decode(\"utf-8\")\n\n binddn = \"%s\\%s\" % ('hcg',username)\n try:\n con.bind_s(binddn, password) \n result = con.search_s(base, int(scope),\"sAMAccountName=%s\" % username, search)\n user_hcg = result[0][1]['sAMAccountName'][0].decode('utf-8')\n \n\n if User.objects.filter(username=username.lower()).count() != 0:\n user = User.objects.get(username=username.lower())\n\n info = findUserGroup(user_hcg)\n \n # get section name\n section = info[9]\n # get title\n title = info[8]\n # get department\n department = info[13]\n # get gender\n gender = info[7]\n\n # update role for user\n have_role = OMIS_UserRole.objects.filter(user_id=user.id).values()\n\n if len(have_role) == 0:\n try:\n if ('manager' in title.lower()) or ('supervisor' in title.lower()) or ('head of' in title.lower()):\n role = OMIS_Role.objects.get(role='Supervisor')\n elif 'leader' in title.lower():\n role = OMIS_Role.objects.get(role='Team Leader')\n elif 'mis' in section.lower():\n role = OMIS_Role.objects.get(role='Admin')\n else:\n role = OMIS_Role.objects.get(role='Operator')\n\n last_record = OMIS_UserRole.objects.all().order_by('id').last()\n user_role_id = increment_id(last_record)\n user_role = OMIS_UserRole(id=user_role_id, role_id=role.id, user_id=user.id) \n user_role.save()\n\n # store user role to session\n request.session['role'] = role.priority\n except Exception as e:\n db_logger.exception(e)\n write_log(e, 'System', 'Function: Login - Save user role: ' + user_hcg)\n return HttpResponse('Error')\n else:\n role = OMIS_Role.objects.get(pk=have_role[0]['role_id'])\n request.session['role'] = role.priority\n\n request.session.modified = True\n # store use info to session\n request.session['displayName'] = result[0][1]['sAMAccountName'][0].decode('utf-8')\n request.session['title'] = title\n request.session['fullName'] = (result[0][1]['sn'][0].decode('utf-8')\n + ' ' + result[0][1]['givenName'][0].decode('utf-8'))\n request.session['department'] = department\n request.session['section'] = section\n request.session['gender'] = gender\n\n # store user group in session\n groupList = []\n user = User.objects.get(username=username.lower())\n group = list(user.groups.all())\n for i in range(len(group)):\n name = str(group[i])\n groupList.append(name)\n request.session['group'] = groupList\n \n \n # store default language in session\n if 'lang' not in request.session:\n content = languageContent(\"en\")\n request.session['lang'] = \"en\"\n request.session['lang_content'] = content \n else:\n info = findUserGroup(user_hcg)\n if info is not None:\n # get first name\n firstname = info[1]\n # get last name\n lastname = info[2]\n # get section name\n section = info[9]\n # get title\n title = info[8]\n # get department\n department = info[13]\n # get gender\n gender = info[7]\n\n email = result[0][1]['mail'][0].decode('utf-8') \n \n User.objects.create_user(username=username.lower(), password=password,email=email, first_name = firstname, last_name = lastname)\n created_user = User.objects.get(username=username.lower()) \n # Add user to group\n if 'customer service' in section.lower():\n if 'digital customer service' in title.lower():\n digit_cs = Group.objects.get(name='Customer Care General') \n digit_cs.user_set.add(created_user)\n else:\n cs = Group.objects.get(name='Customer Service') \n cs.user_set.add(created_user)\n elif 'contract processing' in section.lower():\n cp = Group.objects.get(name='Contract Processing') \n cp.user_set.add(created_user) \n elif 'call center' in section.lower():\n cc = Group.objects.get(name='Call Center') \n cc.user_set.add(created_user)\n elif 'internal service' in section.lower():\n internal_service = Group.objects.get(name='Internal Service') \n internal_service.user_set.add(created_user)\n elif 'operations process & quality assurance' in department.lower():\n qa = Group.objects.get(name='QA & Process') \n qa.user_set.add(created_user)\n elif 'mis' in section.lower():\n mis = Group.objects.get(name='Admin') \n mis.user_set.add(created_user)\n\n # Add role for user\n try:\n if ('manager' in title.lower()) or ('supervisor' in title.lower()) or ('head of' in title.lower()):\n role = OMIS_Role.objects.get(role='Supervisor')\n elif 'leader' in title.lower():\n role = OMIS_Role.objects.get(role='Team Leader')\n elif 'mis' in section.lower():\n role = OMIS_Role.objects.get(role='Admin')\n else:\n role = OMIS_Role.objects.get(role='Operator')\n\n last_record = OMIS_UserRole.objects.all().order_by('id').last()\n user_role_id = increment_id(last_record)\n user_role = OMIS_UserRole(id=user_role_id, role_id=role.id, user_id=created_user.id) \n user_role.save()\n except Exception as e:\n db_logger.exception(e)\n write_log(e, 'System', 'Function: Login - Save user role: ' + user_hcg)\n return redirect('Error')\n\n # Add user info to session\n request.session.modified = True\n # store user role to session\n request.session['role'] = role.priority\n # store use info to session\n request.session['displayName'] = result[0][1]['sAMAccountName'][0].decode('utf-8')\n request.session['title'] = title\n request.session['fullName'] = (result[0][1]['sn'][0].decode('utf-8')\n + ' ' + result[0][1]['givenName'][0].decode('utf-8'))\n request.session['department'] = department\n request.session['section'] = section\n request.session['gender'] = gender\n\n # store user group in session\n groupList = []\n user = User.objects.get(username=username.lower())\n group = list(user.groups.all())\n for i in range(len(group)):\n name = str(group[i])\n groupList.append(name)\n request.session['group'] = groupList\n \n \n # store default language in session\n if 'lang' not in request.session:\n content = languageContent(\"en\")\n request.session['lang'] = \"en\"\n request.session['lang_content'] = content \n\n else:\n return render(request, 'auth-signin.html')\n\n return redirect('Homepage')\n\n except ldap.INVALID_CREDENTIALS:\n return render(request, 'auth-signin.html')\n except ldap.INVALID_DN_SYNTAX:\n return render(request, 'auth-signin.html')\n except ldap.INVALID_SYNTAX:\n return render(request, 'auth-signin.html') \n else:\n return render(request, 'auth-signin.html')\n\n\ndef Logout(request):\n request.session.flush()\n db_logger = logging.getLogger('db')\n db_conn = connections['oracle']\n cursor = db_conn.cursor()\n cursor.close()\n db_conn.commit()\n return render(request, 'auth-signin.html')\n\n\n\n\n@csrf_exempt\ndef changeLanguage(request):\n if request.is_ajax():\n data = json.loads(request.body)\n lang = data[\"lang\"] \n content = languageContent(lang)\n request.session['lang'] = lang\n request.session['lang_content'] = content \n return HttpResponse(lang)\n else:\n isLogged = request.session.get('displayName', 'empty')\n if isLogged == 'empty':\n return render(request, 'auth-signin.html')\n else:\n return redirect('Error')\n\n\ndef findUserGroup(user_hcg):\n db_logger = logging.getLogger('db')\n db_conn = connections['oracle']\n cursor = db_conn.cursor()\n try:\n cursor.execute(\"SELECT * FROM T_OPS_USER WHERE HCG_USERNAME = '{0}' AND STATUS = 1 AND ROWNUM = 1\".format(user_hcg))\n row = cursor.fetchone()\n return row\n except Exception as e:\n error_time = datetime.datetime.now()\n db_logger.exception(e)\n write_log(e, 'System', 'Function: findUserGroup - Find Login User Info: ' + user_hcg)\n return redirect('Error')\n finally:\n cursor.close()\n\n","sub_path":"Login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"430100105","text":"import RPi.GPIO as GPIO\nimport time\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\n\n#Central Flame Sensor\nflameCenter = 38\nGPIO.setup(flameCenter, GPIO.IN)\n\n#Left Flame Sensor\nflameLeft = 37\nGPIO.setup(flameLeft, GPIO.IN)\n\n#Right Flame Sensor\nflameRight = 40\nGPIO.setup(flameRight, GPIO.IN)\n\n#Water Pump\npSwitch = 8\nGPIO.setup(pSwitch, GPIO.OUT)\n\n#UltraSound Sensor Left\nTRIG1 = 29\nECHO1 = 31\n\nGPIO.setup(TRIG1,GPIO.OUT)\nGPIO.setup(ECHO1,GPIO.IN)\n\n\n#UltraSound Sensor Right\nTRIG3 = 33\nECHO3 = 35\n\nGPIO.setup(TRIG3,GPIO.OUT)\nGPIO.setup(ECHO3,GPIO.IN)\n\n\n#Motor Controller\nmotor1F = 11\nmotor1B = 7\nmotor1E = 22\n\nmotor2F = 16\nmotor2B = 13\nmotor2E = 15\n\nGPIO.setup(motor1F,GPIO.OUT)\nGPIO.setup(motor1B,GPIO.OUT)\nGPIO.setup(motor1E,GPIO.OUT)\nGPIO.setup(motor2F,GPIO.OUT)\nGPIO.setup(motor2B,GPIO.OUT)\nGPIO.setup(motor2E,GPIO.OUT)\n\npwm1=GPIO.PWM(22,100)\npwm1.start(100)\n\npwm2=GPIO.PWM(15,100)\npwm2.start(100)\n\n# Function to move forward\ndef forward(L,R):\n pwm1.ChangeDutyCycle(L)\n pwm2.ChangeDutyCycle(R)\n GPIO.output(motor1F,GPIO.HIGH)\n GPIO.output(motor1B,GPIO.LOW)\n GPIO.output(motor1E,GPIO.HIGH)\n GPIO.output(motor2F,GPIO.HIGH)\n GPIO.output(motor2B,GPIO.LOW)\n GPIO.output(motor2E,GPIO.HIGH)\n\n# Function to move backward\ndef backward(L,R):\n pwm1.ChangeDutyCycle(L)\n pwm2.ChangeDutyCycle(R)\n GPIO.output(motor1F,GPIO.LOW)\n GPIO.output(motor1B,GPIO.HIGH)\n GPIO.output(motor1E,GPIO.HIGH)\n GPIO.output(motor2F,GPIO.LOW)\n GPIO.output(motor2B,GPIO.HIGH)\n GPIO.output(motor2E,GPIO.HIGH)\n\n# Function to move right\ndef right(L,R):\n pwm1.ChangeDutyCycle(L)\n pwm2.ChangeDutyCycle(R)\n GPIO.output(motor1F,GPIO.HIGH)\n GPIO.output(motor1B,GPIO.LOW)\n GPIO.output(motor1E,GPIO.HIGH)\n GPIO.output(motor2F,GPIO.LOW)\n GPIO.output(motor2B,GPIO.HIGH)\n GPIO.output(motor2E,GPIO.HIGH)\n\n# Function to move right while moving forward\ndef forwRight(L,R):\n pwm1.ChangeDutyCycle(L)\n pwm2.ChangeDutyCycle(R)\n GPIO.output(motor1F,GPIO.HIGH)\n GPIO.output(motor1B,GPIO.LOW)\n GPIO.output(motor1E,GPIO.HIGH)\n GPIO.output(motor2F,GPIO.HIGH)\n GPIO.output(motor2B,GPIO.LOW)\n GPIO.output(motor2E,GPIO.HIGH)\n\n# Function to move right while moving backward\ndef backRight():\n pwm1.ChangeDutyCycle(100)\n pwm2.ChangeDutyCycle(50)\n GPIO.output(motor1F,GPIO.LOW)\n GPIO.output(motor1B,GPIO.HIGH)\n GPIO.output(motor1E,GPIO.HIGH)\n GPIO.output(motor2F,GPIO.LOW)\n GPIO.output(motor2B,GPIO.HIGH)\n GPIO.output(motor2E,GPIO.HIGH)\n\n# Function to move left\ndef left(L,R):\n pwm1.ChangeDutyCycle(L)\n pwm2.ChangeDutyCycle(R)\n GPIO.output(motor1F,GPIO.LOW)\n GPIO.output(motor1B,GPIO.HIGH)\n GPIO.output(motor1E,GPIO.HIGH)\n GPIO.output(motor2F,GPIO.HIGH)\n GPIO.output(motor2B,GPIO.LOW)\n GPIO.output(motor2E,GPIO.HIGH)\n\n# Function to move left while moving forward\ndef forwLeft(L,R):\n pwm1.ChangeDutyCycle(L)\n pwm2.ChangeDutyCycle(R)\n GPIO.output(motor1F,GPIO.HIGH)\n GPIO.output(motor1B,GPIO.LOW)\n GPIO.output(motor1E,GPIO.HIGH)\n GPIO.output(motor2F,GPIO.HIGH)\n GPIO.output(motor2B,GPIO.LOW)\n GPIO.output(motor2E,GPIO.HIGH)\n\n# Function to move left while moving backward\ndef backLeft():\n pwm1.ChangeDutyCycle(40)\n pwm2.ChangeDutyCycle(100)\n GPIO.output(motor1F,GPIO.LOW)\n GPIO.output(motor1B,GPIO.HIGH)\n GPIO.output(motor1E,GPIO.HIGH)\n GPIO.output(motor2F,GPIO.LOW)\n GPIO.output(motor2B,GPIO.HIGH)\n GPIO.output(motor2E,GPIO.HIGH)\n\n# Function to stop the motor\ndef stop():\n GPIO.output(motor1F,GPIO.LOW)\n GPIO.output(motor1B,GPIO.LOW)\n GPIO.output(motor1E,GPIO.LOW)\n GPIO.output(motor2F,GPIO.LOW)\n GPIO.output(motor2B,GPIO.LOW)\n GPIO.output(motor2E,GPIO.LOW)\n\n\ndef ultraSoundLeft():\n sig_time = 0 # signal time, default = 0\n start = time.time() # receives the start time of the signal sent by the ultrasound\n end = time.time() # receives the end time when the signal is received by the ultrasound\n \n GPIO.output(TRIG1, True) # send a signal through the trigger output\n time.sleep(0.00001) # Time to wait before stopping the output, default = 0.00001 \n GPIO.output(TRIG1, False) # stop sending a signal through the trigger output\n\n while GPIO.input(ECHO1) == False:\n start = time.time() # save a value to the start time when the signal of the echo is not received\n\n while GPIO.input(ECHO1) == True:\n end = time.time() # save a value to the end time when the signal of the echo is received\n\n sig_time = end-start # time the signal took to go and return\n\n #CM:\n distanceLeft = sig_time / 0.000058 # calculates the distance that the sound travelled in cm, V = dDistance / dTime\n\n #print('Left Distance: {} centimeters'.format(distanceLeft))\n return distanceLeft\n\n\n\n'''def ultraSoundCenter():\n sig_time = 0\n start = time.time()\n end = time.time()\n \n GPIO.output(TRIG2, True)\n time.sleep(0.00001)\n GPIO.output(TRIG2, False)\n\n while GPIO.input(ECHO2) == False:\n start = time.time()\n\n while GPIO.input(ECHO2) == True:\n end = time.time()\n\n sig_time = end-start\n\n #CM:\n distanceCenter = sig_time / 0.000058\n\n# print('Center Distance: {} centimeters'.format(distanceCenter))\n return distanceCenter'''\n\ndef ultraSoundRight():\n sig_time = 0\n start = time.time()\n end = time.time()\n \n GPIO.output(TRIG3, True)\n time.sleep(0.00001)\n GPIO.output(TRIG3, False)\n\n while GPIO.input(ECHO3) == False:\n start = time.time()\n\n while GPIO.input(ECHO3) == True:\n end = time.time()\n\n sig_time = end-start\n\n #CM:\n distanceRight = sig_time / 0.000058\n\n #print('Right Distance: {} centimeters'.format(distanceRight))\n return distanceRight\n\ndef flameCenter():\n return GPIO.input(38)\n\ndef flameLeft():\n return GPIO.input(37)\n\ndef flameRight():\n return GPIO.input(40)\n\ndef waterPump():\n GPIO.output(pSwitch, True)\n time.sleep(1) # Sprays the waterPump for time, default = 1 second\n GPIO.output(pSwitch, False)\n\n\n\n#We will call this method when the program quits to reset the pwm and cleanup the GPIO so the robot does not immediately resume the last run\ndef resetBot(): \n pwm1.stop()\n pwm2.stop()\n GPIO.cleanup() # Resets the value of all GPIO pins to 0.\n","sub_path":"motorController.py","file_name":"motorController.py","file_ext":"py","file_size_in_byte":6352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"69911182","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport sys,os,ipdb\nimport ot_utils as ot\n\n\"\"\"Name: \n Description: config file to run QA/QC of datasets\n Notes:\n\"\"\"\n\n#inputs:\n#----------------------------------------------------------------------\n#Best to name working directory as an OT shortname. Assumes you are\n#running this from the scripts directory..\ningestBase = os.path.dirname(os.getcwd())\nshortname = os.path.basename(ingestBase)\nbounds_base = os.path.join(ingestBase,'bounds')\nlog_dir = os.path.join(ingestBase,'logs')\nscripts_dir = os.path.join(ingestBase,'scripts')\n#----------------------------------------------------------------------\n\n\n#Config file for Converting LAS files to LAZ\n#----------------------------------------------------------------------\n#module to initialize the config file to all null values.\nconfig1 = ot.initializeNullConfig()\n\nconfig1['log_dir'] = log_dir\nconfig1['ingestLog'] = os.path.join(log_dir,shortname+'_LAS2LAZ_QAQCLog.txt')\nconfig1['LAS2LAZ'] = 1\nconfig1['LAS2LAZ_method'] = 'pdal'\nconfig1['getFilesWild'] = '.*\\.las$'\nconfig1['getFilesDir'] = '/Volumes/New Volume/ToOT_HD35/2018_13_265_Feehan/_Deliverables/PCTiles'\nconfig1['ftype'] = 'f'\nconfig1['recursive'] = 0\nconfig1['LAZDir_out'] = '/volumes/OT6TB/CA18_Feehan/LAZ'\nconfig1['pipeline'] = os.path.join(scripts_dir,'pipeline.json')\n\n#Run Module to Convert LAS2LAS...\n#ot.RunQAQC(config1)\n#----------------------------------------------------------------------\n\n#Config file for initial check of LAS files....\n#----------------------------------------------------------------------\n#module to initialize the config file to all null values\nconfig2 = ot.initializeNullConfig()\n\nconfig2['log_dir'] = log_dir\nconfig2['ingestLog'] = os.path.join(log_dir,shortname+'_initialCheck_QAQCLog.txt')\nconfig2['recursive'] = 0\nconfig2['getFilesDir'] = '/volumes/OT6TB/CA17_Dietrich/2017_LAS_Tiles'\nconfig2['getFilesWild'] = '.*\\.las$'\nconfig2['ftype'] = 'f'\nconfig2['CreatePDALInfo'] = 1\nconfig2['PDALInfoFile'] = shortname+'_PDALInfoLog_initial.txt'\nconfig2['ReadPDALLog'] = 1\nconfig2['CheckLAZCount'] = 1\nconfig2['MissingHCRS'] = 1\nconfig2['MissingVCRS'] = 1\nconfig2['HCRS_Uniform'] = 1\nconfig2['VCRS_Uniform'] = 1\nconfig2['VersionCheck'] = 1\nconfig2['PointTypeCheck'] = 1\nconfig2['GlobalEncodingCheck'] = 1\nconfig2['PointCountCheck'] = 1\nconfig2['CreatePDALBoundary'] = 1\nconfig2['bounds_PDAL'] = os.path.join(bounds_base,'PDAL.shp')\nconfig2['BufferSize'] = 1\nconfig2['epsg'] = 6339\nconfig2['bounds_PDALmerge'] = os.path.join(bounds_base,'PDALMerged.shp')\nconfig2['bounds_PDALmergeArea'] = os.path.join(bounds_base,'PDALMergedwArea.shp')\nconfig2['bounds_PDALKML'] = os.path.join(bounds_base,'PDALMergedwArea.kml')\nconfig2['winePath'] = '/Applications/LASTools/bin'\nconfig2['CreateLASBoundary'] = 1\nconfig2['bounds_LT'] = os.path.join(bounds_base,'LTBounds.shp')\nconfig2['randFrac'] = 0.25\nconfig2['concavity'] = 100\nconfig2['bounds_LTArea'] = os.path.join(bounds_base,'LTBoundswArea.shp')\nconfig2['bounds_LTKML'] = os.path.join(bounds_base,'LTBoundswArea.kml')\n\n#Run Module to do initial check of LAS files.\n#ot.RunQAQC(config2)\n#----------------------------------------------------------------------\n\n\n#Config file for adding CRS to files...\n#----------------------------------------------------------------------\n#module to initialize the config file to all null values.\nconfig3 = ot.initializeNullConfig()\n\nconfig3['log_dir'] = log_dir\nconfig3['ingestLog'] = os.path.join(log_dir,shortname+'_ADDCRS_QAQCLog.txt')\nconfig3['AddCRS2Header']= 1\nconfig3['getFilesWild'] = '.*\\.las$'\nconfig3['getFilesDir'] = '/volumes/OT6TB/CA17_Dietrich/2017_LAS_Tiles'\nconfig3['ftype'] = 'f'\nconfig3['recursive'] = 0\nconfig3['fsuffix'] = '_EPSG6339'\nconfig3['overwrite'] = 0\nconfig3['LAZDir_out'] = '/volumes/OT6TB/CA17_Dietrich/LAZ'\nconfig3['pipeline'] = os.path.join(scripts_dir,'pipeline.json')\nconfig3['LAS2LAZ_method'] = 'pdal'\n\n#Run Module to add CRS to lidar files (LAS or LAZ)\n#ot.RunQAQC(config3)\n#----------------------------------------------------------------------\n\n\n\n#Config file for QA/QC of LAZ and Create Boundaries\n#----------------------------------------------------------------------\n#module to initialize the config file to all null values\nconfig4 = ot.initializeNullConfig()\n\nconfig4['log_dir'] = log_dir\nconfig4['ingestLog'] = os.path.join(log_dir,shortname+'_QAQCLog.txt')\nconfig4['recursive'] = 0\nconfig4['getFilesDir'] = '/volumes/OT6TB/CA17_Dietrich/LAZ'\nconfig4['getFilesWild'] = '.*\\.laz$'\nconfig4['ftype'] = 'f'\nconfig4['CreatePDALInfo'] = 1\nconfig4['PDALInfoFile'] = shortname+'_PDALInfoLog.txt'\nconfig4['ReadPDALLog'] = 1\nconfig4['CheckLAZCount'] = 1\nconfig4['MissingHCRS'] = 1\nconfig4['MissingVCRS'] = 1\nconfig4['HCRS_Uniform'] = 1\nconfig4['VCRS_Uniform'] = 1\nconfig4['VersionCheck'] = 1\nconfig4['PointTypeCheck'] = 1\nconfig4['GlobalEncodingCheck'] = 1\nconfig4['CreatePDALBoundary'] = 1\nconfig4['bounds_PDAL'] = os.path.join(bounds_base,'PDAL.shp')\nconfig4['BufferSize'] = 1\nconfig4['epsg'] = 6339\nconfig4['bounds_PDALmerge'] = os.path.join(bounds_base,'PDALMerged.shp')\nconfig4['bounds_PDALmergeArea'] = os.path.join(bounds_base,'PDALMergedwArea.shp')\nconfig4['bounds_PDALKML'] = os.path.join(bounds_base,'PDALMergedwArea.kml')\nconfig4['winePath'] = '/Applications/LASTools/bin'\nconfig4['CreateLASBoundary'] = 1\nconfig4['bounds_LT'] = os.path.join(bounds_base,'LTBounds.shp')\nconfig4['randFrac'] = 0.25\nconfig4['concavity'] = 100\nconfig4['bounds_LTArea'] = os.path.join(bounds_base,'LTBoundswArea.shp')\nconfig4['bounds_LTKML'] = os.path.join(bounds_base,'LTBoundswArea.kml')\n\n#Run Module to Ingest LAZ, Create Boundaries\n#ot.RunQAQC(config4)\n#----------------------------------------------------------------------\n\n\n\n#Config file for Checking Original Rasters for Metadata\n#----------------------------------------------------------------------\n#module to initialize the config file to all null values?\nconfig5 = ot.initializeNullConfig()\n\nconfig5['CheckRasMeta'] = 1\nconfig5['log_dir'] = log_dir\nconfig5['ingestLog'] = os.path.join(log_dir,shortname+'_TEST_QAQCLog.txt')\nconfig5['getFilesDir'] = '/volumes/OT6TB/CA17_Dietrich/2017_ESRI_50cm'\nconfig5['getFilesWild'] = '.*\\.flt$'\nconfig5['ftype'] = 'f'\nconfig5['recursive'] = 1\n\n#Run module to convert rasters to tiffs\n#ot.RunQAQC(config5)\n#----------------------------------------------------------------------\n\n\n#Config file for reprojecting and converting to tiffs.\n#----------------------------------------------------------------------\n##module to initialize the config file to all null values\nconfig6 = ot.initializeNullConfig()\n\nconfig6['log_dir'] = log_dir\nconfig6['ingestLog'] = os.path.join(log_dir,shortname+'_TEST_QAQCLog.txt')\nconfig6['getFilesDir'] = '/volumes/OT6TB/CA17_Dietrich/2017_ESRI_50cm'\nconfig6['getFilesWild'] = '.*\\.flt$'\nconfig6['ftype'] = 'f'\nconfig6['recursive'] = 1\nconfig6['Warp2Tiff'] = 1\nconfig6['ras_xBlock'] = 256\nconfig6['ras_yBlock'] = 256\nconfig6['warp_t_srs'] = '6339'\nconfig6['RasOutDir'] = '/path/to/output/rasters'\n\n#Run module to reproject rasters...\n#ot.RunQAQC(config6)\n#----------------------------------------------------------------------\n\n#Config file for ONLY converting to tiffs.\n#----------------------------------------------------------------------\n##module to initialize the config file to all null values\nconfig6 = ot.initializeNullConfig()\n\nconfig6['log_dir'] = log_dir\nconfig6['ingestLog'] = os.path.join(log_dir,shortname+'_FLT2TIF_QAQCLog.txt')\nconfig6['getFilesDir'] = '/Volumes/New Volume/ToOT_HD35/2018_13_265_Feehan/_Deliverables/Rasters'\nconfig6['getFilesWild'] = '.*\\.flt$'\nconfig6['ftype'] = 'f'\nconfig6['recursive'] = 1\nconfig6['Translate2Tiff'] = 1\nconfig6['ras_xBlock'] = 256\nconfig6['ras_yBlock'] = 256\nconfig6['RasOutDir'] = '/volumes/OT6TB/CA18_Feehan/Rasters'\n\n#Run module to reproject rasters...\n#ot.RunQAQC(config6)\n#----------------------------------------------------------------------\n\n\n#Make sure the proper CRS info is in the header.\n#----------------------------------------------------------------------\n#module to initialize the config file to all null values?\nconfig7 = ot.initializeNullConfig()\nconfig7['SetRasterCRS'] = 1\nconfig7['log_dir'] = log_dir\nconfig7['ingestLog'] = os.path.join(log_dir,shortname+'_TEST_QAQCLog.txt')\nconfig7['getFilesDir'] = '/volumes/OT6TB/CA17_Dietrich/2017_ESRI_50cm'\nconfig7['getFilesWild'] = '.*\\.tif$'\nconfig7['ftype'] = 'f'\nconfig7['recursive'] = 1\nconfig7['a_srs']='6339+5703'\n\n#Run module to re-check the raster metadata\n#ot.RunQAQC(config7)\n#----------------------------------------------------------------------\n\n \n\n\n\n","sub_path":"ingest_template.py","file_name":"ingest_template.py","file_ext":"py","file_size_in_byte":8745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"189618027","text":"import numpy as np\nimport time\nfrom ple import PLE\nfrom ple.games.flappybird import FlappyBird\n\n\nclass FlappyBirdEnv:\n def __init__(self, sleep_time=0.0):\n \"\"\"\n Constructs an environment like the environments in OpenAI Gym's library.\n\n :param sleep_time: float - how long the env should pause between each action. This argument is\n only used when env.display = True so you can actually see what is going on. Otherwise the\n game rendering is so fast that it is hard to see.\n\n There is only 1 possible action in Flappy Bird. The getActionSet method retrieves all\n possible actions. Ordinarily a single action would be represented in a binary way, (0, 1),\n where 1 is when the action is taken. Here however, PLE returns [119, None], where 119 is the\n action.\n \"\"\"\n self.sleep_time = sleep_time\n self.game = FlappyBird(pipe_gap=125)\n self.env = PLE(self.game, fps=30, display_screen=False)\n self.env.init()\n self.action_map = self.env.getActionSet() # [119, None]\n self.env_name = \"FlappyBird\"\n\n def get_observation(self):\n \"\"\"\n The game state returns a dictionary whose keys describe what each value represents.\n This method returns the values of this dictionary as a numpy array, which\n matches the convention from OpenAI's Gym library.\n\n Game state returns a dict of 8 dimensions or features:\n player_y, player_vel,\n next_pipe_dist_to_player, next_pipe_top_y, next_pipe_bottom_y,\n next_next_pipe_dist_to_player, next_next_pipe_top_y, next_next_pipe_bottom_y\n\n :return: numpy array of shape (8,) representing dimensions or features of the game state\n \"\"\"\n obs = self.env.getGameState()\n return np.array(list(obs.values()))\n\n def step(self, action):\n \"\"\"\n Take an action and return the next observed state, reward, and done condition.\n\n :param action: int - List index for self.action_map for the action to take. This is\n always either 0 or 1 for this particular game.\n\n :return: next observed state (np array of shape (8,)), reward (float), done condition (bool)\n \"\"\"\n action = self.action_map[action] # retrieves the action from the key:value map\n time.sleep(self.sleep_time) # sleeps - useful if display=True so you can actually see what's going on\n reward = self.env.act(action) # calculates reward or fitness\n done = self.env.game_over() # checks if the game is over (Flappy Bird only ends when you lose)\n obs = self.get_observation()\n return obs, reward, done\n\n def reset(self):\n \"\"\"\n Resets the game's state and returns an observation from the reset state\n \"\"\"\n self.env.reset_game()\n return self.get_observation()\n\n def set_display(self, boolean_value):\n \"\"\"\n Changes the display condition, which determines whether or not to display the Pygame\n in a screen to let you view what is going on.\n\n :param boolean_value: either True or False\n \"\"\"\n self.env.display_screen = boolean_value\n","sub_path":"environments/flappy_bird_env.py","file_name":"flappy_bird_env.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"305135286","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('env', 'CUDA_VISIBLE_DEVICES = 1')\nimport trainer\nimport pandas as pd\nimport numpy as np\nimport util\nimport my_models\nfrom util import *\nfrom my_models import *\n\n\n# In[2]:\n\n\n# choose your model to predict\nmodel_list = [wavenet,waveadded,tcn]\nmodel_ob = model_list[0]()\nm = model_ob.build_model(68,18)\n\n\n# In[3]:\n\n\nENC_COL = ['week_from_cny',\n 'off_on_ratio','week_of_year',\n 'holiday_effect_noncny',\n 'is_cny']\nSR_FEATURE = [ 'sr','same_product_event_sr',\n 'same_subcategory_event_sr',\n 'same_category_event_sr',]\nFEATURE = SR_FEATURE+ENC_COL\nDATA = trainer.Trainer('../../TEST/synthetic_rnn/data_level4.csv','../../TEST/synthetic_rnn/dim_week.csv',\n '../../TEST/synthetic_rnn/dim_product_lc.csv')\ndf_sr4 = DATA.read_data()\ndf_all = DATA.preprocess(df_sr4)\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler\nenc = OneHotEncoder()\nf_mtx = df_sr4[FEATURE].iloc[:,1:]\nf_in = np.array(f_mtx)[:,(len(SR_FEATURE)-1):]\nt_enc = enc.fit(f_in)\n\nENC_LENGTH = 18*4\nPRED_LENGTH = 18\n#fea_num = len(FEATURE_COL)\nBATCH_SIZE = 128\nwhole_ts = PRED_LENGTH+ENC_LENGTH\nTRAIN,TEST,VAL = DATA.split_train_test(df_all,ENC_LENGTH,PRED_LENGTH)\nTEST_FEATURE_DATA,TEST_SR_DATA = get_ts_info(TEST,whole_ts,FEATURE)\nFEATURE_LENGTH = 68\n\n\n# In[4]:\n\n\nsteps_per_epoch = 500\n\n\n# In[5]:\n\n\ngenerator = data_gen()\ntest_gen = generator.data_generator(TEST_FEATURE_DATA[-1000:],TEST_SR_DATA[-1000:],batch_size=1000,\n feature_num = FEATURE_LENGTH,\n steps_per_epoch=steps_per_epoch,\n input_sequence_length=ENC_LENGTH,\n sr_num = len(SR_FEATURE),\n target_sequence_length=PRED_LENGTH,\n seed=30)\n\n\n# In[41]:\n\n\nfrom keras import models\nbacth_size = 128\n\nclass predict(object):\n def __init__(self,filepath='../base02.h5',test_generator=test_gen,batch_size=128):\n self.filepath = filepath\n self.test_generator= test_gen\n self.batch_size = batch_size\n \n def predict_sequence(self,my_model,datagen,sample_size,pred_steps=18):\n pred_seq = np.zeros((sample_size,pred_steps,1))\n iterator = iter(datagen)\n input_sequence = next(iterator)\n target = input_sequence[1]\n x = input_sequence[0]\n for j in range(pred_steps):\n last_pred_step = my_model.predict(x)[:,-1,0]\n #print(last_pred_step.shape)\n pred_seq[:,j,0] = last_pred_step\n #print(input_sequence[0].shape)\n last_step = np.zeros((sample_size,1,FEATURE_LENGTH))\n last_step[:,0,:] = input_sequence[0][:,ENC_LENGTH+j,:]\n last_step[:,0,0] = last_pred_step\n input_ts = np.concatenate([input_sequence[0],last_step],axis=1)\n #print(input_ts.shape)\n x = input_ts\n return pred_seq,target\n \n def get_results(self):\n model_used = models.load_model(self.filepath)\n generator = data_gen()\n test_gen = generator.data_generator(TEST_FEATURE_DATA[-1000:],TEST_SR_DATA[-1000:],batch_size=1,\n feature_num = FEATURE_LENGTH,\n steps_per_epoch=steps_per_epoch,\n input_sequence_length=ENC_LENGTH,\n sr_num=4,\n target_sequence_length=PRED_LENGTH,\n seed=30)\n predict,target = self.predict_sequence(model_used,test_gen,sample_size=1)\n return predict,target\n \n def predict_and_plot(self,feature_list, sr_list, sample_ind, my_model,sr_num,enc_tail_len=72,pred_steps=18):\n input_series = self.get_batch_input(feature_list, sr_list, sample_ind, enc_tail_len, pred_steps,sr_num)\n pred_series,target = self.get_results()\n # print(pred_series)\n\n #input_series = input_series.reshape(-1,1)\n pred_series = pred_series.reshape(-1,1) \n mean = np.mean(np.log1p(sr_list[sample_ind]))\n sr = np.log1p(sr_list[sample_ind])/mean\n target_series = sr[enc_tail_len:].reshape(-1,1)\n '''\n mean = np.mean(np.log1p(sr_list[sample_ind]))\n pred = np.exp(mean*pred_series)\n '''\n encode_series_tail = sr[:72].reshape(-1,1)\n # print(encode_series_tail.shape)\n x_encode = enc_tail_len\n\n plt.figure(figsize=(10,6)) \n\n plt.plot(range(1,x_encode+1),encode_series_tail)\n plt.plot(range(x_encode,x_encode+pred_steps),target_series,color='orange')\n plt.plot(range(x_encode,x_encode+pred_steps),pred_series,color='teal',linestyle='--')\n\n plt.title('Encoder Series Tail of Length %d, Target Series, and Predictions' % enc_tail_len)\n plt.legend(['Encoding Series','Target Series','Predictions'])\n \n def get_batch_input(self,feature_list, sr_list, sample_ind, input_seq_length, pred_length,sr_num):\n s = np.zeros((1,whole_ts,FEATURE_LENGTH))\n x,feature,y,ts_mean_list = get_batch_matrix(feature_list,sr_list,sample_ind,ENC_LENGTH,PRED_LENGTH,sr_num)\n for i in range(whole_ts):\n s[0,i,:] = feature[i]\n s[0,:,0] = x\n enc_input = s[:,:ENC_LENGTH,:]\n dec_output = np.expand_dims(s[:,ENC_LENGTH:,0],axis=2)\n dec_input = get_teaching_force(FEATURE_LENGTH,enc_input,s[:,ENC_LENGTH:,:])\n in_data = np.concatenate([enc_input, dec_input], axis=1)\n return in_data\n\n\n# In[42]:\n\n\np = predict()\n\n\n# In[44]:\n\n\np.predict_and_plot(TEST_FEATURE_DATA, TEST_SR_DATA,120,m,4, enc_tail_len=72,pred_steps=18)\n\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"243798009","text":"\nclass ConversionNotPossible(ValueError):\n pass\n\n\ndef convert(fromUnit, toUnit, value):\n \"\"\"takes a string as unit to be converted, another string as the unit to convert to and a float as the value to be converted.\"\"\"\n\n fromUnit, toUnit = fromUnit.lower(), toUnit.lower() #make sure strings are lower case\n\n converted_val = 0.0\n \n # important values for conversion equations\n c_k = 273.15\n c_f1, c_f2 = 9/5, 32 \n f_k1, f_k2 = 459.67, 5/9\n f_c1, f_c2 = 32, 5/9\n k_f1, k_f2 = 9/5, 459.67\n k_c = 273.15\n m_y = 1760.0\n m_m = 1609.344\n y_m = 1.094\n\n #store formulas\n the_math = {\n ('celsius', 'kelvin') : lambda a : a + c_k,\n ('celsius', 'farenheit') : lambda a : a * c_f1 + c_f2,\n ('farenheit', 'kelvin') : lambda a : (a + f_k1) * f_k2,\n ('farenheit', 'celsius') : lambda a : (a - f_c1) * f_c2,\n ('kelvin', 'farenheit') : lambda a : a * k_f1 - k_f2,\n ('kelvin', 'celsius') : lambda a : a - k_c,\n ('miles', 'yards') : lambda a : a * m_y,\n ('miles', 'meters') : lambda a : a * m_m,\n ('yards', 'miles') : lambda a : a / m_y,\n ('yards', 'meters') : lambda a : a / y_m,\n ('meters', 'miles') : lambda a : a / m_m,\n ('meters', 'yards') : lambda a : a * y_m,\n }\n\n if fromUnit == toUnit:\n converted_val = value # if args are same, return value as ==\n return converted_val\n\n else:\n try:\n converted_val = round(the_math[fromUnit, toUnit](value), 2)\n return converted_val\n except:\n raise ConversionNotPossible(f\"Can't convert {fromUnit} to {toUnit}\")\n\n","sub_path":"conversions_refactored.py","file_name":"conversions_refactored.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"165508487","text":"def area(radius):\n return 3.14 * radius * radius\n\ndef vol(area, length):\n print(area * length)\n\nradius = input('enter a radius: ')\nlength = input('enter a length: ')\n\nradius = int(radius)\nlength = int(length)\n\narea_calc = area(radius)\nvol(area_calc, length)\n","sub_path":"lessons/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"299040553","text":"import requests\nfrom requests.exceptions import RequestException\n\n\nfrom PyQt5 import QtWidgets as qtw\nfrom PyQt5 import QtCore as qtc\nfrom PyQt5 import QtGui as qtg\nfrom PyQt5 import QtMultimedia as qtm\nfrom PyQt5 import uic\n\nimport utils\n\n\nclass MainWidget(qtw.QWidget):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setupUi()\n\n def setupUi(self):\n self.setObjectName('Form')\n layout = qtw.QGridLayout(self)\n\n requestWidget = qtw.QWidget()\n self.requestButton = qtw.QPushButton('POST')\n\n contentWidget = qtw.QWidget()\n contentLayout = qtw.QGridLayout(contentWidget)\n self.contentTypeCombo = qtw.QComboBox()\n self.contentTypeCombo.addItem('application/json')\n self.contentTypeCombo.addItem('application/x-www-form-urlencoded')\n self.contentTypeCombo.addItem('multipart/form-data')\n self.contentEdit = qtw.QPlainTextEdit()\n contentLayout.addWidget(self.contentTypeCombo)\n contentLayout.addWidget(self.contentEdit)\n\n headersWidget = qtw.QWidget()\n headersLayout = qtw.QGridLayout(headersWidget)\n self.headersEdit = qtw.QPlainTextEdit()\n headersLayout.addWidget(self.headersEdit)\n\n cookiesWidget = qtw.QWidget()\n cookiesLayout = qtw.QGridLayout(cookiesWidget)\n self.cookiesEdit = qtw.QPlainTextEdit()\n cookiesLayout.addWidget(self.cookiesEdit)\n\n requestTab = qtw.QTabWidget()\n requestTab.addTab(contentWidget, 'Content')\n requestTab.addTab(headersWidget, 'Headers')\n requestTab.addTab(cookiesWidget, 'Cookies')\n\n requestLayout = qtw.QGridLayout(requestWidget)\n requestLayout.addWidget(qtw.QLabel('Request'), 0, 0)\n requestLayout.addWidget(self.requestButton, 0, 1)\n requestLayout.addWidget(requestTab, 1, 0, 1, 2)\n\n responseWidget = qtw.QWidget()\n self.responseEdit = qtw.QPlainTextEdit()\n\n responseLayout = qtw.QGridLayout(responseWidget)\n responseLayout.addWidget(qtw.QLabel('Response'))\n responseLayout.addWidget(self.responseEdit)\n\n splitter = qtw.QSplitter(qtc.Qt.Vertical)\n splitter.addWidget(requestWidget)\n splitter.addWidget(responseWidget)\n layout.addWidget(splitter)\n\n\nif __name__ == '__main__':\n app = qtw.QApplication([])\n\n w = MainWidget()\n w.show()\n\n app.exec_()\n","sub_path":"app_handcoded.py","file_name":"app_handcoded.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"280044819","text":"#!/usr/bin/env python\n\nimport sys\nimport csv\nimport os\n\n\nif __name__ == \"__main__\":\n # Process command line args: 2 files only \n if len(sys.argv) != 3:\n print(\"usage: {} FileA FileB\".format(sys.argv[0]))\n sys.exit(1)\n\n # Initialize data structures\n dicta = dict()\n dictb = dict()\n common_keys = list()\n extra_a_keys = list()\n extra_b_keys = list()\n outfile_list = list()\n baseline_procs = ['DTS_ExportRadiatorLedger', 'DTS_ExportSML401k', 'DTS_ExportStateTexas', 'Employee_CompanyDocumentList', 'Employee_EnrollHistoryConfirmationList', 'Employee_EnrollPlanLink', 'Employee_PayCycleCurrent', 'Employee_PayCycleHistory', 'EnrollConfig_ClientPlan', 'EnrollConfig_ClientPlanFile', 'EnrollConfig_ClientPlanRate', 'EnrollConfig_EnrollCoverageEventMapCopyTo', 'Export_BatchInsert', 'GetMaximumAllowablePayrollCalculations', 'GetNextPayrollCalculationInQueue', 'HREmployee_SendTestEmail', 'HREmployee_TaxStateOptionList', 'HRTimeCard_EmployeeManagerDrp', 'HRTimeCard_EmployeeTC1TimeCardDelete', 'HRTimeCard_EmployeeTC1TimeOffDelete', 'HRTimeCard_EmployeeTC2TimeCardDelete', 'IsPayrollCalculationQueueEnabled', 'Report_PayrollSummaryByDates'] \n\n # Process files in data structures\n try:\n with open(sys.argv[1], \"r\") as filea:\n for line in csv.DictReader(\n filea, fieldnames=(\"stored_proc\",\"a\"), delimiter=\" \"):\n\n # Filter out baseline procs\n if dict(line)[\"stored_proc\"] not in baseline_procs:\n count = int(\"\".join(dict(line)[None]))\n dicta[dict(line)[\"stored_proc\"]] = count\n except FileNotFoundError:\n print(\"ERROR: File: {} does not exist.\".format(sys.argv[1]))\n sys.exit(1)\n\n try:\n with open(sys.argv[2], \"r\") as fileb:\n for line in csv.DictReader(\n fileb, fieldnames=(\"stored_proc\",\"a\"), delimiter=\" \"):\n\n # Filter out baseline procs\n if dict(line)[\"stored_proc\"] not in baseline_procs:\n count = int(\"\".join(dict(line)[None]))\n dictb[dict(line)[\"stored_proc\"]] = count\n except FileNotFoundError:\n print(\"ERROR: File: {} does not exist.\".format(sys.argv[2]))\n sys.exit(1)\n\n # Process data: compare stored proc names and their counts, respectively \n akeys = dicta.keys()\n bkeys = dictb.keys()\n common_keys = list(set(akeys).intersection(bkeys))\n extra_a_keys = list(set(akeys) - set(bkeys))\n extra_b_keys = list(set(bkeys) - set(akeys))\n\n # print(\"common keys: {}\".format(common_keys))\n print(\"extra a keys: {}\".format(extra_a_keys))\n print(\"extra b keys: {}\".format(extra_b_keys))\n\n # Print/store results\n # Delete output file, if exists\n outfile_name = \"get-covered-stored-procs.csv\"\n try:\n os.remove(outfile_name)\n except FileNotFoundError:\n pass\n\n # Handle any extra procs in baseline\n for akey in extra_a_keys:\n outfile_list.append(\"{},{},{},{}\\n\".format(akey, dicta[akey], 0, \"proc_in_baseline_only\"))\n\n # Handle any extra procs in compare file\n for bkey in extra_b_keys:\n outfile_list.append(\"{},{},{},{}\\n\".format(bkey, dictb[bkey], 0, \"proc_in_compare_only\"))\n\n # Handle common keys \n for ckey in common_keys:\n if dicta[ckey] != dictb[ckey]:\n outfile_list.append(\"{},{},{},{}\\n\".format(ckey, dicta[ckey], dictb[ckey], \"proc_in_both\"))\n\n # Write to output file\n # Header: \"stored_proc\", \"baseline_cnt\", \"compare_cnt\", \"tag\"\n outfile = open(outfile_name, \"a\")\n for proc_found in sorted(outfile_list):\n outfile.write(proc_found)\n outfile.close()\n","sub_path":"payroll-db-stored-procs/compute_executed_stored_procs.py","file_name":"compute_executed_stored_procs.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"538215401","text":"\"\"\"empty message\n\nRevision ID: 180579c756b4\nRevises: dc9617df55ef\nCreate Date: 2018-11-30 15:46:38.690574\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '180579c756b4'\ndown_revision = 'dc9617df55ef'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('events',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('event_name', sa.String(), nullable=True),\n sa.Column('event_description', sa.String(), nullable=True),\n sa.Column('event_location', sa.String(), nullable=True),\n sa.Column('event_start_date', sa.DateTime(), nullable=True),\n sa.Column('event_end_date', sa.DateTime(), nullable=True),\n sa.Column('event_status', sa.Boolean(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_events_event_end_date'), 'events', ['event_end_date'], unique=False)\n op.create_index(op.f('ix_events_event_start_date'), 'events', ['event_start_date'], unique=False)\n op.create_table('occupation',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('occupation_name', sa.Text(), nullable=True),\n sa.Column('occupation_company', sa.Text(), nullable=True),\n sa.Column('occupation_start_date', sa.DateTime(), nullable=True),\n sa.Column('occupation_end_date', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_occupation_occupation_end_date'), 'occupation', ['occupation_end_date'], unique=False)\n op.create_index(op.f('ix_occupation_occupation_start_date'), 'occupation', ['occupation_start_date'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_occupation_occupation_start_date'), table_name='occupation')\n op.drop_index(op.f('ix_occupation_occupation_end_date'), table_name='occupation')\n op.drop_table('occupation')\n op.drop_index(op.f('ix_events_event_start_date'), table_name='events')\n op.drop_index(op.f('ix_events_event_end_date'), table_name='events')\n op.drop_table('events')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/180579c756b4_.py","file_name":"180579c756b4_.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"85459491","text":"__author__ = 'kavin'\n\n#encoding=utf8\"\nimport json\nimport re\n\ndef predict_weight_match(json_str_1,json_str_2):\n\n \"\"\"\n This function will match the weights of two given products using a regular expression.\n Two product information (in JSON format) are passed as input to the function.\n The output returns a list in the format : [return_value,1]\n where 1 represents the confidence score value is described as follows\n :param: json_str_1, json_str_2\n :return: [1,1] if both the products have same weights\n [0,1] if two products have different weights\n [-1,1] if one or both of the products have weight values missing in the product description\n\n \"\"\"\n\n result_list = []\n c1 = 0\n c2 = 0\n c3 = 0\n regex = 'Weight[^:;,.]*?:[^:,.]*?\\s*-*([0-9]+\\.*[0-9]*)\\s*((?:g|tons|ton|lbs|lb|pounds|pound|ounce|oz|gram|grams)*(?:/cm2|/m2|/m|/cm|/mm|/mm2)*(?:\\²)*)\\s*[\\.|<>]*'\n match1 = re.findall(regex,json_str_1,re.I)\n match1 = list(set(match1))\n #print match1\n match2 = re.findall(regex,json_str_2,re.I)\n match2 = list(set(match2))\n #print match2\n if not match1 or not match2:\n result_list.append(-1)\n result_list.append(1)\n #print result_list\n c3+=1\n else:\n count = 0\n count1 = 0\n count2 = 0\n for val in match1:\n for val1 in match2:\n if not val[0] or not val[1] or not val1[0] or not val1[1]:\n count1+=1\n\n elif float(val[0]) == float(val1[0]) and (str(val[1]).lower() == str(val1[1]).lower() or re.findall('^lb',val[1],re.I)==re.findall('^lb',val1[1],re.I) or re.findall('^pound',val[1],re.I)==re.findall('^pound',val1[1],re.I) or re.findall('^g',val[1],re.I)==re.findall('^g',val1[1],re.I) or re.findall('^o',val[1],re.I)==re.findall('^o',val1[1],re.I)):\n count+=1\n elif (re.findall('^lb',val[1],re.I) or re.findall('^pound',val[1],re.I)) and (re.findall('^pound',val1[1],re.I) or re.findall('^lb',val1[1],re.I)) :\n if float(val[0])==float(val1[0]):\n count+=1\n else:\n count2+=1\n else:\n count2+=1\n\n if count > 0:\n result_list.append(1)\n result_list.append(1)\n #print result_list\n c1+=1\n \n elif count2 > 0:\n result_list.append(0)\n result_list.append(1)\n #print result_list\n c2+=1\n \n else:\n result_list.append(-1)\n result_list.append(1)\n #print result_list\n c3+=1\n \n #print result_list\n return result_list","sub_path":"DML/project/entity_matcher/Temp/weight/weight_module.py","file_name":"weight_module.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"463379100","text":"import collections\nimport math\n\n\ndef Entropy(y):\n if (len(y) == 0):\n return 0.0\n distribution = collections.Counter()\n for yValue in y:\n distribution[yValue] += 1\n numSamples = sum(distribution.values())\n entropy = 0.0\n for value in distribution:\n p = distribution[value] / numSamples\n entropy += -p * math.log2(p)\n return entropy\n\n\ndef SplitOnFeature(x, y, featureIndex):\n # find the split point to use\n min = None\n max = None\n for i in range(len(x)):\n thisValue = x[i][featureIndex]\n if min == None:\n min = thisValue\n max = thisValue\n if thisValue < min:\n min = thisValue\n if thisValue > max:\n max = thisValue\n threshold = (min + max) / 2.0\n # format is [, ]\n # and each of those is [x, y] for the samples that meet the criteria\n sampleSubsets = [[[], []], [[], []]]\n for i in range(len(x)):\n thisValue = x[i][featureIndex]\n if thisValue < threshold:\n subset = sampleSubsets[0]\n else:\n subset = sampleSubsets[1]\n\n subset[0].append(x[i])\n subset[1].append(y[i])\n return (threshold, sampleSubsets)\n\n\ndef FindBestSplit(x, y, featureSet=None):\n if featureSet == None: # no restriction on features\n featureSet = set()\n for i in range(len(x[0])):\n featureSet.add(i)\n totalEntropy = Entropy(y)\n samples = len(y)\n bestIndex = 0\n bestGain = 0\n bestSubsets = {}\n bestThreshold = None\n for i in range(len(x[0])):\n if i in featureSet: # if we can consider this feature, then do.\n (threshold, subsets) = SplitOnFeature(x, y, i)\n entropyAfterSplit = 0\n for subset in subsets:\n xVal = subset[0]\n yVal = subset[1]\n entropyAfterSplit += (len(xVal) / samples) * Entropy(yVal)\n gain = totalEntropy - entropyAfterSplit\n if gain > bestGain:\n bestIndex = i\n bestGain = gain\n bestSubsets = subsets\n bestThreshold = threshold\n if bestGain == 0:\n return None\n return (bestIndex, bestThreshold, bestSubsets)\n\n\nclass TreeNode(object):\n def __init__(self):\n self.labelDistribution = collections.Counter()\n self.splitIndex = None\n self.threshold = None\n self.children = []\n self.x = []\n self.y = []\n\n def AddData(self, x, y):\n self.x += x\n self.y += y\n for newY in y:\n self.labelDistribution[newY] += 1\n\n def GrowTree(self, minToSplit, featureSet=None):\n if len(self.x) < minToSplit:\n return\n splitAnswer = FindBestSplit(self.x, self.y, featureSet)\n if splitAnswer == None:\n return\n (self.splitIndex, self.threshold, splitSubsets) = splitAnswer\n for subset in splitSubsets:\n childNode = TreeNode()\n childNode.AddData(subset[0], subset[1])\n self.children.append(childNode)\n childNode.GrowTree(minToSplit, featureSet)\n\n def predict(self, x):\n if self.splitIndex != None:\n if x[self.splitIndex] < self.threshold:\n return self.children[0].predict(x)\n else:\n return self.children[1].predict(x)\n return self.labelDistribution.most_common(1)[0][0]\n\n def visualize(self, depth=1):\n if self.splitIndex == None:\n print(self.labelDistribution)\n else:\n print(\"Split on: %d\" % (self.splitIndex))\n # less than\n for i in range(depth):\n print(' ', end='', flush=True)\n print(\"< %f -- \" % self.threshold, end='', flush=True)\n self.children[0].visualize(depth + 1)\n # greater than or equal\n for i in range(depth):\n print(' ', end='', flush=True)\n print(\">= %f -- \" % self.threshold, end='', flush=True)\n self.children[1].visualize(depth + 1)\n\n\nclass DecisionTreeModel(object):\n \"\"\"A learns simple decision trees.\"\"\"\n\n def __init__(self):\n self.treeNode = TreeNode()\n\n def fit(self, x, y, featureSet=None, minToSplit=100, logProgress=False):\n self.treeNode.AddData(x, y)\n self.treeNode.GrowTree(minToSplit, featureSet)\n\n def predict(self, x):\n y = []\n for example in x:\n y.append(self.treeNode.predict(example))\n return y\n\n def visualize(self):\n self.treeNode.visualize()","sub_path":"Assignment9/Code/model/GeoffDecisionTreeModel.py","file_name":"GeoffDecisionTreeModel.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"596306769","text":"# MIT License\n#\n# Copyright (c) 2018 Haoxintong\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Reorganize cifar-10 data\"\"\"\n\nimport os\nimport cv2\nimport pickle\nimport argparse\nimport numpy as np\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Reorganize data.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data-root', required=True, type=str,\n help='root dir of data')\n parser.add_argument('--output-root', required=True, type=str,\n help='output dir of the images')\n args = parser.parse_args()\n return args\n\n\nfiles = [\"data_batch_1\", \"data_batch_2\", \"data_batch_3\",\n \"data_batch_4\", \"data_batch_5\", \"test_batch\"]\n\n\ndef load_pickle_data(file_name):\n with open(file_name, \"rb\") as f:\n data = pickle.load(f, encoding='bytes')\n return data\n\n\ndef mkdir_if_not_exist(path):\n if not os.path.exists(os.path.join(*path)):\n os.makedirs(os.path.join(*path))\n\n\ndef reorg_data(data_root, output_root):\n for file in files:\n im_root = os.path.join(output_root, \"train\" if not file.startswith(\"test\") else \"test\")\n data = load_pickle_data(os.path.join(data_root, file))\n for i in range(len(data[b'labels'])):\n img = data[b'data'][i]\n label = data[b'labels'][i]\n filename = data[b'filenames'][i].decode()\n mkdir_if_not_exist([im_root, str(label)])\n img = np.reshape(img, [3, 32, 32])\n img = img.transpose((1, 2, 0))\n img = img[..., ::-1]\n cv2.imwrite(os.path.join(im_root, str(label), filename), img)\n\n\nif __name__ == '__main__':\n pargs = parse_args()\n reorg_data(pargs.data_root, pargs.output_root)\n","sub_path":"utils/reorg_cifar10_data.py","file_name":"reorg_cifar10_data.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"441022227","text":"############## CONFIGURATION FILE ###############\n### Audio configuration will be effective ### \n### after running a script twice ###\n#################################################\n\n### Sampling rate ###\nsr = 44100\n### Control rate ###\nkr = 4410\n### Software buffer ###\nsoftBuffer = 256\n### Hardware buffer ###\nhardBuffer = 4096\n### Audio format ('wav' or 'aif') ###\naudioFormat = 'wav'\n### Audio driver ('portaudio' or 'winmme')\ndriver = 'portaudio'\n### Sample format ('' = 16 bits, '-3' = 24 bits, '-f' = 32 bits float)\nsampleFormat = ''\n### Audio input number (leave it blank to use Csound default or see Csound log file) ###\naudioInputNumber = ''\n### Audio output number (leave it blank to use Csound default or see Csound log file) ###\naudioOutputNumber = ''\n\n### Prefered audio editor (will be effective after restarting Ounk) ###\naudioEditor = 'C:\\\\\"Program Files\"\\Audacity\\\\audacity'\n\n### GUI style ###\npreferedStyle = 'Default'","sub_path":"settings/config_win32.py","file_name":"config_win32.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"102926541","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport argparse\nimport atexit\nimport os\nfrom kafka import KafkaConsumer, TopicPartition\nimport sys\nimport traceback\nimport time\nimport threading\nfrom pprint import pprint\nfrom operator import delslice\n\nverbose = False\nsimulated = False\n\ntrace_records = {}\n\ndef list_topics(client, name):\n if not name:\n for item in sorted(client.topics()):\n print(item)\n else:\n partitions = client.partitions_for_topic(name)\n tps = [TopicPartition(name, p) for p in partitions]\n tpts = client.end_offsets(tps)\n print(tpts)\n print ('Name: ', name)\n print ('Partition Count: ', len(partitions))\n \ndef read_topic(consumer,\n key_filter, \n message_filter, \n print_key, \n print_meta, \n print_ts, \n suppress, \n date_filter, \n rule,\n total=None):\n rules = {'all':all, 'any':any}\n if verbose:\n print(\"Launching loop to read the messages.\")\n counter = 0\n for message in consumer:\n counter += 1\n if counter % 1000000 == 0:\n print (\"Read {0} messages.\".format(counter).rjust(200), end='\\r', file=sys.stderr)\n if total and counter > total:\n return\n if key_filter:\n if not message.key:\n continue\n if not rules[rule](x in message.key for x in key_filter):\n continue\n if message_filter:\n if not message.value:\n continue\n if not rules[rule](x in message.value for x in message_filter):\n continue\n if date_filter:\n dateStr = time.strftime(\"%Y-%m-%d\", time.gmtime(message.timestamp/1000))\n if dateStr not in date_filter:\n continue\n if print_meta:\n print ('{p}+{o}+'.format(p=message.partition, o=message.offset), end=\"\")\n if print_key:\n print ('{}+'.format(message.key), end=\"\")\n if print_ts:\n print ('{}+'.format(time.strftime(\"%Y-%m-%d %H:%M:%SZ\", time.gmtime(message.timestamp/1000))), end=\"\")\n if message.key:\n count_tup = trace_records.get(message.key, (0,0))\n adds = count_tup[0]\n dels = count_tup[1]\n if message.value:\n adds = adds + 1\n else:\n dels = dels + 1\n trace_records[message.key] = (adds,dels)\n \ndef wait_for_threads():\n seconds = 0\n while threading.active_count() != 0:\n print ('Waiting for threads to exit.')\n time.sleep(1)\n seconds += 1\n if seconds == 10:\n sys.exit(0)\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Add/remove containers to server.\")\n \n parser.add_argument(\"-v\", \"--verbose\", help=\"Verbose output.\", action=\"store_true\")\n parser.add_argument(\"-s\", \"--simulated\", help=\"Debugging only - no changes will be made to cluster.\", action=\"store_true\")\n parser.add_argument(\"-b\", \"--broker\", help=\"Kafka bootstrap broker\", metavar=\"hostname\", required=True)\n parser.add_argument(\"-p\", \"--port\", help=\"Kafka bootstrap broker port\", metavar=\"port\", type=int, default=9092)\n parser.add_argument(\"-t\", \"--topic\", help=\"Topic name\", metavar=\"NAME\")\n parser.add_argument(\"-k\", \"--key-filter\", help=\"Filter term for key\", metavar=\"TERM\", action=\"append\")\n parser.add_argument(\"-m\", \"--message-filter\", help=\"Filter term for message\", metavar=\"TERM\", action=\"append\")\n parser.add_argument(\"-L\", \"--list\", help=\"List topic(s)\", action=\"store_true\")\n parser.add_argument(\"-o\", \"--offset\", help=\"Offset to read from\", choices=['beginning', 'end'], default='end')\n parser.add_argument(\"-O\", \"--time-offset\", help=\"How back in time to read from\", choices=['1d', '2d', '4d', '1w', '2w', '1m'], default='1w')\n parser.add_argument(\"-M\", \"--Metadata\", help=\"Include metadata about the message\", action=\"store_true\")\n parser.add_argument(\"-K\", \"--Key\", help=\"Include message key\", action=\"store_true\")\n parser.add_argument(\"-e\", \"--exit-at-end\", help=\"Quit when no new messages read in 5 seconds.\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--rule\", help=\"Match all or any\", choices=['all', 'any'], default='all')\n parser.add_argument(\"-T\", \"--Timestamp\", help=\"Print the message timestamp\", action=\"store_true\")\n parser.add_argument(\"-D\", \"--Date-filter\", help=\"Filter message based on time\", metavar=\"YYYY-MM-DD\", action=\"append\")\n parser.add_argument(\"-S\", \"--Suppress\", help=\"Print only metadata\", action=\"store_true\")\n \n args = parser.parse_args()\n \n global verbose\n global simulated\n \n verbose = args.verbose\n simulated = args.simulated\n \n if verbose:\n print (args)\n \n bootstrap = ['{0}:{1}'.format(args.broker, args.port)]\n \n try:\n client = KafkaConsumer(args.topic,\n enable_auto_commit=False,\n auto_offset_reset='earliest',\n bootstrap_servers=bootstrap)\n if args.list:\n list_topics(client, args.topic)\n sys.exit(0)\n \n if args.topic:\n partitions = client.partitions_for_topic(args.topic)\n tps = [TopicPartition(args.topic, p) for p in partitions]\n \n timeout_ms = 30000 if args.exit_at_end else float('inf')\n if args.offset == 'end':\n client = KafkaConsumer(\n enable_auto_commit=False,\n auto_offset_reset='latest',\n bootstrap_servers=bootstrap,\n consumer_timeout_ms=timeout_ms)\n else:\n client = KafkaConsumer(\n enable_auto_commit=False,\n auto_offset_reset='earliest',\n bootstrap_servers=bootstrap,\n consumer_timeout_ms=timeout_ms)\n \n client.assign(tps)\n client.seek_to_beginning()\n \n if args.time_offset:\n off = args.time_offset\n offtime_ms = 24 * 3600 * 1000\n if off == '2d':\n offtime_ms *= 2\n if off == '4d':\n offtime_ms *= 4\n if off == '1w':\n offtime_ms *= 7\n if off == '2w':\n offtime_ms *= 14\n if off == '1m':\n offtime_ms *= 30\n \n print (offtime_ms)\n \n currtime_ms = int(time.time() * 1000)\n timestamps = {}\n for tp in tps:\n timestamps[tp] = currtime_ms - offtime_ms\n tpts = client.offsets_for_times(timestamps)\n end_tpts = client.end_offsets(tps)\n total = 0\n for tp in tpts:\n total += end_tpts[tp] - tpts[tp].offset\n client.seek(tp, tpts[tp].offset)\n \n print('Total: ', total)\n \n read_topic(client,\n args.key_filter, \n args.message_filter, \n args.Key, \n args.Metadata, \n args.Timestamp, \n args.Suppress,\n args.Date_filter, \n args.rule,\n total)\n tot_dels = 0\n tot_adds = 0\n morethanonedels = 0\n for key in trace_records.keys():\n count_tup = trace_records[key]\n adds = count_tup[0]\n dels = count_tup[1]\n tot_adds += adds\n tot_dels += dels\n if dels > 1:\n morethanonedels += 1\n if count_tup[0] < count_tup[1]:\n print(\"{0} - adds:{1}, dels:{2}\".format(key, count_tup[0], count_tup[1]))\n \n print(\"Total creates: {0} deletes: {1}\".format(tot_adds, tot_dels))\n print(\"Num records with multiple deletes: {0}\".format(morethanonedels))\n \n except KeyboardInterrupt as e:\n print (\"Stopped\")\n client.close()\n\nif __name__ == \"__main__\":\n atexit.register(wait_for_threads)\n main()\n","sub_path":"match_deletes_log_tailer.py","file_name":"match_deletes_log_tailer.py","file_ext":"py","file_size_in_byte":8467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"130217157","text":"import csv, ast, torch, torch.nn as nn, numpy as np\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset\n\n\n# input: relative dataset filepath\n# output: raw train, raw validation, vocabulary set\ndef load_data_set(name, vocabulary_mode=1):\n # vocabulary mode:\n # 1 : train only\n # 2 : train and validation\n raw_val = []\n raw_train = []\n raw_test = []\n vocabulary = []\n # default is VUA\n # path for training and validation sets\n train_path = \"../datasets/VUAsequence/VUA_seq_formatted_train.csv\"\n validation_path = \"../datasets/VUAsequence/VUA_seq_formatted_val.csv\"\n test_path = \"../datasets/VUAsequence/VUA_seq_formatted_test.csv\"\n # column index for sentence,label in csv\n sentence_index = 2\n label_index = 3\n\n if name == \"vua\":\n train_path = \"../datasets/VUAsequence/VUA_seq_formatted_train.csv\"\n validation_path = \"../datasets/VUAsequence/VUA_seq_formatted_val.csv\"\n test_path = \"../datasets/VUAsequence/VUA_seq_formatted_test.csv\"\n\n sentence_index = 2\n label_index = 3\n\n with open(train_path, encoding='latin-1') as f:\n lines = csv.reader(f)\n next(lines)\n for line in lines:\n label_seq = ast.literal_eval(line[label_index])\n raw_train.append([line[sentence_index], label_seq])\n vocabulary.extend(line[sentence_index].split())\n\n with open(validation_path, encoding='latin-1') as f:\n lines = csv.reader(f)\n next(lines)\n for line in lines:\n label_seq = ast.literal_eval(line[label_index])\n raw_val.append([line[sentence_index], label_seq])\n if vocabulary_mode == 2:\n vocabulary.extend(line[sentence_index].split())\n\n with open(test_path, encoding='latin-1') as f:\n lines = csv.reader(f)\n next(lines)\n for line in lines:\n label_seq = ast.literal_eval(line[label_index])\n raw_test.append([line[sentence_index], label_seq])\n if vocabulary_mode == 2:\n vocabulary.extend(line[sentence_index].split())\n\n return raw_val, raw_train, raw_test, set(vocabulary)\n\n# input: a bag of words\n# output: word2index and index2word dictionary mapping\ndef generate_w2idx_idx2w(vocabulary):\n word2idx = {\"\": 0, \"\": 1}\n idx2word = {0: \"\", 1: \"\"}\n for word in vocabulary:\n assigned_index = len(word2idx)\n word2idx[word] = assigned_index\n idx2word[assigned_index] = word\n return word2idx, idx2word\n\n#input: word2idx and idx2word mapping\n#ouput: nn.Embedding matrix\ndef get_embedding_matrix(word2idx, idx2word, normalization=False):\n # Load the GloVe vectors into a dictionary, keeping only words in vocab\n # dimension is defined by glove.\n embedding_dim = 300\n glove_path = \"../glove/glove840B300d.txt\"\n glove_vectors = {}\n with open(glove_path) as glove_file:\n for line in tqdm(glove_file, total=sum(1 for line in open(glove_path))):\n split_line = line.rstrip().split()\n word = split_line[0]\n if len(split_line) != (embedding_dim + 1) or word not in word2idx:\n continue\n assert (len(split_line) == embedding_dim + 1)\n vector = np.array([float(x) for x in split_line[1:]], dtype=\"float32\")\n if normalization:\n vector = vector / np.linalg.norm(vector)\n assert len(vector) == embedding_dim\n glove_vectors[word] = vector\n\n print(\"Number of pre-trained word vectors loaded: \", len(glove_vectors))\n\n # Calculate mean and stdev of embeddings\n all_embeddings = np.array(list(glove_vectors.values()))\n embeddings_mean = float(np.mean(all_embeddings))\n embeddings_stdev = float(np.std(all_embeddings))\n print(\"Embeddings mean: \", embeddings_mean)\n print(\"Embeddings stdev: \", embeddings_stdev)\n\n # Randomly initialize an embedding matrix of (vocab_size, embedding_dim) shape\n # with a similar distribution as the pretrained embeddings for words in vocab.\n vocab_size = len(word2idx)\n embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean, embeddings_stdev)\n # Go through the embedding matrix and replace the random vector with a\n # pretrained one if available. Start iteration at 2 since 0, 1 are PAD, UNK\n for i in range(2, vocab_size):\n word = idx2word[i]\n if word in glove_vectors:\n embedding_matrix[i] = torch.FloatTensor(glove_vectors[word])\n if normalization:\n for i in range(vocab_size):\n embedding_matrix[i] = embedding_matrix[i] / float(np.linalg.norm(embedding_matrix[i]))\n embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)\n embeddings.weight = nn.Parameter(embedding_matrix)\n return embeddings\n\ndef embed_sequence(sentence, word2idx, glove_embeddings):\n\n words = sentence.split()\n\n # 1. embed the sequence by glove vector\n # Replace words with tokens, and 1 (UNK index) if words not indexed.\n indexed_sequence = [word2idx.get(x, 1) for x in words]\n # glove_part has shape: (seq_len, glove_dim)\n glove_embedded = glove_embeddings(Variable(torch.LongTensor(indexed_sequence)))\n\n return glove_embedded.data\n\nclass TextDataset(Dataset):\n def __init__(self, embedded_text, labels):\n \"\"\"\n\n :param embedded_text:\n :param pos_seqs: a list of list: each inner list is a sequence of indexed pos tags\n :param labels: a list of list: each inner list is a sequence of 0, 1.\n :param max_sequence_length: an int\n \"\"\"\n if len(embedded_text) != len(labels):\n raise ValueError(\"Differing number of sentences and labels!\")\n # A list of numpy arrays, where each inner numpy arrays is sequence_length * embed_dim\n # embedding for each word is : glove + elmo + suffix\n self.embedded_text = embedded_text\n # a list of list: each inner list is a sequence of 0, 1.\n # where each inner list is the label for the sentence at the corresponding index.\n self.labels = labels\n # Truncate examples that are longer than max_sequence_length.\n # Long sequences are expensive and might blow up GPU memory usage.\n\n def __getitem__(self, idx):\n \"\"\"\n Return the Dataset example at index `idx`.\n\n Returns\n -------\n example_pos_seq:\n a list of indexed pos tag sequence\n example_text: numpy array\n length: int\n The length of the (possibly truncated) example_text.\n example_label_seq: a list of 0 or 1\n The label of the example.\n \"\"\"\n example_text = self.embedded_text[idx]\n example_label_seq = self.labels[idx]\n # Truncate the sequence if necessary\n example_length = example_text.shape[0]\n assert (example_length == len(example_label_seq))\n return example_text, example_length, example_label_seq\n\n def __len__(self):\n \"\"\"\n Return the number of examples in the Dataset.\n \"\"\"\n return len(self.labels)\n\n @staticmethod\n def collate_fn(batch):\n \"\"\"\n Given a list of examples (each from __getitem__),\n combine them to form a single batch by padding.\n\n Returns:\n -------\n batch_padded_example_text: LongTensor\n LongTensor of shape (batch_size, longest_sequence_length) with the\n padded text for each example in the batch.\n length: LongTensor\n LongTensor of shape (batch_size,) with the unpadded length of the example.\n example_label: LongTensor\n LongTensor of shape (batch_size,) with the label of the example.\n \"\"\"\n batch_padded_example_text = []\n batch_lengths = []\n batch_padded_labels = []\n\n # Get the length of the longest sequence in the batch\n max_length = -1\n for text, length, labels in batch:\n if length > max_length:\n max_length = length\n\n # Iterate over each example in the batch\n for text, length, label in batch:\n # Unpack the example (returned from __getitem__)\n # Amount to pad is length of longest example - length of this example.\n amount_to_pad = max_length - length\n # Tensor of shape (amount_to_pad,), converted to LongTensor\n pad_tensor = torch.zeros(amount_to_pad, text.shape[1])\n\n # Append the pad_tensor to the example_text tensor.\n # Shape of padded_example_text: (padded_length, embedding_dim)\n # top part is the original text numpy,\n # and the bottom part is the 0 padded tensors\n\n # text from the batch is a np array, but cat requires the argument to be the same type\n # turn the text into a torch.FloatTenser, which is the same type as pad_tensor\n text = torch.Tensor(text)\n padded_example_text = torch.cat((text, pad_tensor), dim=0)\n\n # pad the labels with zero.\n padded_example_label = label + [0] * amount_to_pad\n\n # Add the padded example to our batch\n batch_padded_example_text.append(padded_example_text)\n batch_lengths.append(length)\n batch_padded_labels.append(padded_example_label)\n\n # Stack the list of LongTensors into a single LongTensor\n return (\n torch.stack(batch_padded_example_text),\n torch.LongTensor(batch_lengths),\n torch.LongTensor(batch_padded_labels))","sub_path":"s2s/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"576852190","text":"# # Code Version 13: April 22nd, 2021\n\n\n########## DEPENDENCIES FOR PIPELINE ###########\n#Stuff for making directories and finding files\nimport glob, os, sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport fnmatch\nimport time as clock\n#Stuff for doing math and plotting\nimport numpy as np\nimport matplotlib\n#matplotlib.use('Agg') #<--- for cluster only\nimport matplotlib.pyplot as plt\nfrom matplotlib import pylab\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom pylab import *\nimport matplotlib.gridspec as gridspec\nfrom scipy.signal import savgol_filter\n\n#stuff for getting FFI data from MAST\nimport astropy\nimport astroquery\nfrom astroquery.mast import Catalogs\nfrom astroquery.mast import Tesscut\nfrom astropy.coordinates import SkyCoord\nfrom astroquery.gaia import Gaia\nfrom astropy.wcs import WCS\nfrom astropy.io import fits\nimport astropy.units as u\n\n#in case there are WiFi issues, these may help\nfrom urllib.error import HTTPError\nimport requests\n\n#stuff for detecting periodic transit events\nfrom transitleastsquares import catalog_info\nfrom transitleastsquares import period_grid\nfrom transitleastsquares import transitleastsquares\n\n#No one likes to see warnings (Feel free to comment this out if you do!)\n\nimport warnings\n# warnings.filterwarnings(action='once') #useful to see a warning once but that's it\nwarnings.simplefilter(\"ignore\", category=PendingDeprecationWarning)\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n os.environ[\"PYTHONWARNINGS\"] = \"ignore\" # Also affect subprocesses\n\n \n#for N order PLD\nfrom itertools import combinations_with_replacement as CwR\nfrom fbpca import pca\n \n# for detrending\nimport wotan\nfrom wotan import flatten\n\n#for outlier removal\nfrom wotan import slide_clip\n\n#for saving\nimport pandas as pd\n\n#for getting TPFs\nimport lightkurve as lk\n\n# for clearning all that dataframe garbage memory that Python stores\nimport gc\n\n#########################################################\n#########################################################\n########## CUSTOM PIPELINE FUNCTIONS BELOW ##############\n#########################################################\n########################################################\n\n# Useful inverse-variance weighting binning function \n# (also used in AstroImageJ, courtesty of Karen Collins)\n\ndef Bin_func(time,flux,error,binsize):\n import math\n import numpy as np\n good = np.where(np.isfinite(time))\n timefit = time[good]\n fluxfit = flux[good]\n errfit = error[good]\n timemax = np.max(timefit)\n timemin = np.min(timefit)\n npoints = len(timefit)\n nbins = int(math.ceil((timemax - timemin)/binsize)) #binsize in days\n bintime = np.full((nbins,), np.nan)\n binflux = np.full((nbins,), np.nan)\n binerr = np.full((nbins,), np.nan)\n for i in range(0,nbins-1):\n tobin = [np.where( (timefit >= (timemin + i*binsize)) & (timefit < (timemin + (i+1)*binsize)) )]\n if tobin[0] != -1:\n # inverse variance weighted means\n binflux[i] = ((fluxfit[tobin]/(errfit[tobin]**2.0)).sum()) / ((1.0/errfit[tobin]**2.0).sum())\n bintime[i] = ((timefit[tobin]/(errfit[tobin]**2.0)).sum()) / ((1.0/errfit[tobin]**2.0).sum())\n binerr[i] = 1.0 / (np.sqrt( (1.0/errfit[tobin]**2.0)).sum() )\n \n good2 = np.where(np.isfinite(bintime))\n bintime = bintime[good2]\n binflux = binflux[good2]\n binerr = binerr[good2]\n \n return bintime, binflux, binerr\n\n\n####Step 0\ndef Make_dirs(path,Sector,cadence):\n import os\n #Step 0: Creating directories to save figures and data\n path=path+'Sector_'+str(Sector)+'/'\n savefigpath1 = path+'FFI_PLD_plots/'\n savelcpath1 = path+'FFI_PLD_LCs/'\n savefigpath2 = path+'TPF_PLD_plots/'\n savelcpath2 = path+'TPF_PLD_LCs/' \n downloadpath = path+'cache/'\n ###\n if cadence=='long':\n savefigpath=savefigpath1\n savelcpath=savelcpath1\n downloadpath=downloadpath\n if os.path.exists(savefigpath1)==True:\n pass\n else: \n os.makedirs(savefigpath1)\n if os.path.exists(savelcpath1)==True:\n pass\n else:\n os.makedirs(savelcpath1) \n if os.path.exists(downloadpath)==True:\n pass\n else: \n os.makedirs(downloadpath) \n if cadence=='short': \n savefigpath=savefigpath2\n savelcpath=savelcpath2\n downloadpath=downloadpath\n if os.path.exists(savefigpath2)==True:\n pass\n else: \n os.makedirs(savefigpath2)\n if os.path.exists(savelcpath2)==True:\n pass\n else:\n os.makedirs(savelcpath2)\n if os.path.exists(downloadpath)==True:\n pass\n else: \n os.makedirs(downloadpath) \n ### \n return path, savefigpath, savelcpath, downloadpath\n\n\n\n####Step 1: Get Images\ndef center_cutout(hdu,cutoutsize,cadence): \n from astropy.io import fits\n x=hdu[1].header['1CRPX4']\n y=hdu[1].header['2CRPX4']\n reference_pixel=[x,y]\n \n \n size=cutoutsize #new cutout\n\n col = int(x)\n row = int(y)\n s = (size/2, size/2)\n\n imshape = np.shape(hdu[1].data['FLUX'][1:]) #use 1st image?\n # Find the image edges\n col_edges = np.asarray([np.nanmax([0, col-s[0]]),\n np.nanmin([col+s[0], imshape[1]])],\n dtype=int)\n row_edges = np.asarray([np.nanmax([0, row-s[1]]),\n np.nanmin([row+s[1], imshape[0]])],\n dtype=int)\n\n primaryhdu = hdu[0].copy()\n\n #now we need coordinates\n from astropy.wcs import WCS\n w = WCS(hdu[2].header)\n X, Y = np.meshgrid(np.arange(imshape[2]), np.arange(imshape[1]))\n pos_corr1_pix = np.copy(hdu[1].data['POS_CORR1'])\n pos_corr2_pix = np.copy(hdu[1].data['POS_CORR2'])\n\n # We zero POS_CORR* when the values are NaN or make no sense (>50px)\n with warnings.catch_warnings(): # Comparing NaNs to numbers is OK here\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n bad = np.any([~np.isfinite(pos_corr1_pix),\n ~np.isfinite(pos_corr2_pix),\n np.abs(pos_corr1_pix - np.nanmedian(pos_corr1_pix)) > 50,\n np.abs(pos_corr2_pix - np.nanmedian(pos_corr2_pix)) > 50], axis=0)\n pos_corr1_pix[bad], pos_corr2_pix[bad] = 0, 0\n\n # Add in POSCORRs\n X = (np.atleast_3d(X).transpose([2, 0, 1]) +\n np.atleast_3d(pos_corr1_pix).transpose([1, 2, 0]))\n Y = (np.atleast_3d(Y).transpose([2, 0, 1]) +\n np.atleast_3d(pos_corr2_pix).transpose([1, 2, 0]))\n\n # Pass through WCS\n ra, dec = w.wcs_pix2world(X.ravel(), Y.ravel(), 0)\n# ra, dec = w.wcs_pix2world(X.ravel(), Y.ravel(), 1)\n ra = ra.reshape((pos_corr1_pix.shape[0], imshape[1], imshape[2]))\n dec = dec.reshape((pos_corr2_pix.shape[0], imshape[1], imshape[2]))\n quality_mask = hdu[1].data['QUALITY']!=0\n r,d = ra[quality_mask], dec[quality_mask]\n\n hdu[2].header['RA_OBJ'] = np.nanmean(r[row_edges[0]:row_edges[1], col_edges[0]:col_edges[1]])\n hdu[2].header['DEC_OBJ'] = np.nanmean(d[row_edges[0]:row_edges[1], col_edges[0]:col_edges[1]])\n\n\n from copy import deepcopy\n hdus = [primaryhdu]\n\n\n # Copy the header\n primary_hdr = deepcopy(hdu[0].header)\n bintable_hdr = deepcopy(hdu[1].header)\n image_hdr = deepcopy(hdu[2].header)\n\n # hdus = fits.PrimaryHDU(data=hdu[0], header=primary_hdr)\n\n # Trim any columns that have the shape of the image, to be the new shape\n data_columns = []\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for idx, datacol in enumerate(hdu[1].columns):\n # If the column is 3D\n if (len(hdu[1].data[datacol.name].shape) == 3):\n # Make a copy, trim it and change the format\n datacol = deepcopy(datacol)\n datacol.array = datacol.array[:, row_edges[0]:row_edges[1], col_edges[0]:col_edges[1]]\n datacol._dim = '{}'.format(datacol.array.shape[1:]).replace(' ', '')\n datacol._dims = datacol.array.shape[1:]\n datacol._format = fits.column._ColumnFormat('{}{}'.format(np.product(datacol.array.shape[1:]),\n datacol._format[-1]))\n data_columns.append(datacol)\n bintable_hdr['TDIM{}'.format(idx)] = '{}'.format(datacol.array.shape[1:]).replace(' ', '')\n bintable_hdr['TDIM9'] = '{}'.format(datacol.array.shape[1:]).replace(' ', '')\n bintable_hdr['TDIM13'] = '{}'.format((0, datacol.array.shape[1])).replace(' ', '')\n else:\n data_columns.append(datacol)\n\n # Get those coordinates sorted for the corner of the TPF and the WCS\n bintable_hdr['1CRV*P'] = bintable_hdr['1CRV4P'] + col_edges[0]\n bintable_hdr['2CRV*P'] = bintable_hdr['2CRV4P'] + row_edges[0]\n bintable_hdr['1CRPX*'] = bintable_hdr['1CRPX4'] - col_edges[0]\n bintable_hdr['2CRPX*'] = bintable_hdr['2CRPX4'] - row_edges[0]\n\n\n # Make a table for the data\n data_columns[-1]._dim = '{}'.format((0, int(data_columns[5]._dim.split(',')[1][:-1]))).replace(' ', '')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n BinTablehdu = fits.BinTableHDU.from_columns(data_columns, header=bintable_hdr)\n\n # Append it to the hdulist\n hdus.append(BinTablehdu)\n\n # Correct the aperture mask\n Imagehdu = hdu[2].copy()\n ar = Imagehdu.data\n ar = ar[row_edges[0]:row_edges[1], col_edges[0]:col_edges[1]]\n Imagehdu.header['NAXIS1'] = ar.shape[0]\n Imagehdu.header['NAXIS2'] = ar.shape[1]\n Imagehdu.data = ar\n hdus.append(Imagehdu)\n\n\n \n newfits = fits.HDUList(hdus)\n \n return newfits\n\ndef gethdu(ID,Sector,cutoutsize,cadence,minimum_photon_counts,verbose,downloadpath):\n #from NEMESIS_pipeline import center_cutout\n import lightkurve\n from lightkurve.search import _search_products \n import os\n import requests\n import time as clock \n starName=\"TIC \"+str(ID)\n degrees = 21/3600 #21 arcsec to degrees\n if cadence=='long':\n ffi_or_tpf='FFI'\n if cadence=='short':\n ffi_or_tpf='Target Pixel' \n try:\n notworking=False\n start=clock.time()\n search_string=_search_products(starName, radius=degrees, \\\n filetype=ffi_or_tpf, cadence=cadence, \\\n mission=('TESS'), sector=Sector)\n while True:\n try:\n data = search_string.download(cutout_size=(cutoutsize+1,cutoutsize+1),\\\n quality_bitmask='hardest',download_dir=downloadpath)\n notworking=1\n if notworking==True:\n break\n except requests.exceptions.HTTPError as E: \n print('')\n print(E)\n print('waiting 2 seconds then trying again to get through MAST')\n clock.sleep(2) \n print(' ') \n end=clock.time()\n runtime=end-start\n if runtime > 60:\n print('FFI download time: '+str(runtime/60)+' minutes')\n if runtime < 60:\n print('FFI download time: '+str(runtime)+' seconds')\n# lightkurve has its own cutout function but it seems iffy too...\n #force sizes to be even (sometimes it isn't...)\n x=data.hdu[1].header['1CRPX4']#-1\n y=data.hdu[1].header['2CRPX4']#-1\n reference_pixel=[x,y]\n try:\n data2=data.cutout(center=(reference_pixel[0],reference_pixel[1]),size=(cutoutsize,cutoutsize))\n hdu=data2.hdu\n except ValueError: #in case there are NaNs produced in centered cutout\n hdu=data.hdu\n\n # os.system(\"rm -r \" + downloadpath) #delete cache path\n\n# hdu=data.hdu \n\n # NOT SURE THIS WORKS PROPERLY, LEAVE THIS OFF FOR NOW\t\n # NOT SURE THIS WORKS PROPERLY, LEAVE THIS OFF FOR NOW\t\n #hdu = center_cutout(hdu,cutoutsize=10,cadence=cadence)\n # NOT SURE THIS WORKS PROPERLY, LEAVE THIS OFF FOR NOW\t\n # NOT SURE THIS WORKS PROPERLY, LEAVE THIS OFF FOR NOW\t\n \n ### quick quality check for a minimum amount of desired max brightness\n if len(hdu[1].data['FLUX'])<1: \n print('Weird image (maybe near edge of detector?)')\n return None,None,None,None,None\n elif (len(hdu[1].data['FLUX'])>1) & (np.nanmedian(hdu[1].data['FLUX']) < minimum_photon_counts):\n print('Images have median brightness less than '+str(minimum_photon_counts)+'!')\n return None,None,None,None,None \n else:\n ###\n CCD=hdu[0].header['CCD']\n Camera=hdu[0].header['Camera']\n wcs = WCS(hdu[2].header)\n quality_mask = hdu[1].data['QUALITY']!=0\n\n # getting pixel coordinates\n # x=hdu[2].header['CRPIX1']\n # y=hdu[2].header['CRPIX2'] \n # these are more accurate?\n if cadence=='short':\n x=hdu[1].header['1CRPX4']-1\n y=hdu[1].header['2CRPX4']-1\n if cadence=='long':\n x=hdu[1].header['1CRPX4']-1\n y=hdu[1].header['2CRPX4']-1\n reference_pixel=[x,y]\n\n return hdu,CCD,Camera,quality_mask,reference_pixel\n except (AttributeError, NameError, \\\n UnboundLocalError,lightkurve.search.SearchError) as E: \n print(E)\n print('NO HDU IN FFI/TPF FOR TIC '+str(ID)+' IN SECTOR '+ str(Sector))\n return None,None,None,None,None\n\n \n \ndef check_centroids(ID,Sector,cutoutsize,cadence,reference_pixel,savelcpath):\n #from NEMESIS_pipeline import readNDarr\n #from NEMESIS_pipeline import gethdu, Make_dirs,centroid_quadratic\n import pandas as pd \n verbose=False\n\n pix_mask = readNDarr(savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_pix_mask\")\n images = readNDarr(savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_image_fluxes\") \n time = pd.read_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_RAW_LC_systematics_removed.txt\")['Time']\n \n centxs=[]\n centys=[]\n for x in range(len(images)):\n im=images[x]\n centxx,centyy = centroid_quadratic(im, pix_mask,reference_pixel)\n centxs=np.append(centxs,centxx)\n centys=np.append(centys,centyy)\n \n return centxs,centys\n\n\n\n \n####Step 2 helper function \ndef thresholdmask(hdu,reference_pixel,threshold,use_centroid=False): \n import numpy as np\n ### threshold higher is more conservative, lower is more liberal\n ### define image from image data\n median_image = np.nanmedian(hdu[1].data['FLUX'], axis=0)\n vals=median_image[np.isfinite(median_image)].flatten()\n ###\n from astropy.stats.funcs import median_absolute_deviation as MAD\n ###\n # A value for the number of sigma by which a pixel needs to be\n # brighter than the median flux to be included in the aperture:\n ###\n MADcut = ( 1.4826*MAD(vals)*threshold +np.nanmedian(median_image))\n threshold_mask = np.nan_to_num(median_image) > MADcut\n \n\n if (reference_pixel == None):\n # return all regions above threshold\n return threshold_mask\n \n \n ###\n from scipy.ndimage import label #<--converts image to 1s and 0s as \"labels\"\n labels = label(threshold_mask)[0]\n label_args = np.argwhere(labels > 0)\n ### \n ### For all pixels above threshold, compute distance to reference pixel:\n distances = [np.hypot(crd[0], crd[1]) for crd in label_args - np.array([reference_pixel[1], reference_pixel[0]])]\n ###\n ### Which label corresponds to the closest pixel?\n closest_arg = label_args[np.argmin(distances)]\n closest_label = labels[closest_arg[0], closest_arg[1]]\n ###\n threshmask=labels==closest_label \n \n if use_centroid==False:\n reference_pixel = reference_pixel\n if use_centroid==True:\n reference_pixel = centroid_quadratic( np.nanmedian(hdu[1].data['FLUX']), threshold_mask,reference_pixel) \n \n return threshmask#, reference_pixel\n\n\n\n# Lightkurve method of finding centroids of brightest pixel (it's pretty good!)\ndef centroid_quadratic(data, mask, reference_pixel):#mask=None):\n \"\"\"Computes the quadratic estimate of the centroid in a 2d-array.\n This method will fit a simple 2D second-order polynomial\n $P(x, y) = a + bx + cy + dx^2 + exy + fy^2$\n to the 3x3 patch of pixels centered on the brightest pixel within\n the image. This function approximates the core of the Point\n Spread Function (PSF) using a bivariate quadratic function, and returns\n the maximum (x, y) coordinate of the function using linear algebra.\n For the motivation and the details around this technique, please refer\n to Vakili, M., & Hogg, D. W. 2016, ArXiv, 1610.05873.\n Caveat: if the brightest pixel falls on the edge of the data array, the fit\n will tend to fail or be inaccurate. \n As used in the Lightkurve package of Barentsen et al.\n Parameters\n ----------\n data : 2D array\n The 2D input array representing the pixel values of the image.\n mask : array_like (bool), optional\n A boolean mask, with the same shape as `data`, where a **True** value\n indicates the corresponding element of data is masked.\n Returns\n -------\n column, row : tuple\n The coordinates of the centroid in column and row. If the fit failed,\n then (NaN, NaN) will be returned.\n \"\"\"\n # Step 1: identify the patch of 3x3 pixels (z_)\n # that is centered on the brightest pixel (xx, yy)\n if mask is not None:\n data = data * mask\n arg_data_max = np.nanargmax(data)\n yy, xx = np.unravel_index(arg_data_max, data.shape)\n \n # Make sure the 3x3 patch does not leave the image bounds\n if yy < 1:\n yy = 1\n if xx < 1:\n xx = 1\n if yy > (data.shape[0] - 2):\n yy = data.shape[0] - 2\n if xx > (data.shape[1] - 2):\n xx = data.shape[1] - 2\n\n z_ = data[yy-1:yy+2, xx-1:xx+2]\n\n # Next, we will fit the coefficients of the bivariate quadratic with the\n # help of a design matrix (A) as defined by Eqn 20 in Vakili & Hogg\n # (arxiv:1610.05873). The design matrix contains a\n # column of ones followed by pixel coordinates: x, y, x**2, xy, y**2.\n \n A = np.array([[1, -1, -1, 1, 1, 1],\n [1, 0, -1, 0, 0, 1],\n [1, 1, -1, 1, -1, 1],\n [1, -1, 0, 1, 0, 0],\n [1, 0, 0, 0, 1, 0],\n [1, 1, 0, 1, 0, 0],\n [1, -1, 1, 1, -1, 1],\n [1, 0, 1, 0, 0, 1],\n [1, 1, 1, 1, 1, 1]])\n \n # We also pre-compute $(A^t A)^-1 A^t$, cf. Eqn 21 in Vakili & Hogg.\n At = A.transpose()\n \n # In Python 3 this can become `Aprime = np.linalg.inv(At @ A) @ At`\n Aprime = np.matmul(np.linalg.inv(np.matmul(At, A)), At)\n\n # Step 2: fit the polynomial $P = a + bx + cy + dx^2 + exy + fy^2$\n # following Equation 21 in Vakili & Hogg.\n # In Python 3 this can become `Aprime @ z_.flatten()`\n a, b, c, d, e, f = np.matmul(Aprime, z_.flatten()) #dot product\n\n # Step 3: analytically find the function maximum,\n # following https://en.wikipedia.org/wiki/Quadratic_function\n det = 4 * d * f - e ** 2\n if abs(det) < 1e-6:\n return np.nan, np.nan # No solution\n xm = - (2 * f * b - c * e) / det\n ym = - (2 * d * c - b * e) / det\n \n return np.array(xx + xm), np.array(yy + ym) \n\n#functions to calculate CDPP in a given window of time\ndef running_median(data, window_size):\n from collections import deque\n from bisect import insort, bisect_left\n from itertools import islice\n seq = iter(data)\n d = deque()\n s = []\n result = []\n for item in islice(seq, window_size):\n d.append(item)\n insort(s, item)\n result.append(s[len(d)//2])\n m = window_size // 2\n for item in seq:\n old = d.popleft()\n d.append(item)\n del s[bisect_left(s, old)]\n insort(s, item)\n result.append(s[m])\n return result\n\n#functions to calculate CDPP in a given window of time\ndef running_mean(data,window_size):\n import numpy as np\n if window_size > len(data):\n window_size = len(data)\n cumsum = np.cumsum(np.insert(data, 0, 0))\n return (cumsum[window_size:] - cumsum[:-window_size]) / float(window_size)\n\n#functions to calculate CDPP in a given window of time\ndef CDPP(time,flux,error,method,unit,binsize=(1.0/24.0)): #1hr bin by default\n import numpy as np\n #Step 1: Calc number of data points per time bin\n cad = np.nanmedian(np.diff(time))\n Npts_per_timebin = int(binsize/cad)\n \n #Step 2: Estimate median or MAD of binned flux\n if method=='median':\n binmed = running_median(flux,Npts_per_timebin)\n if method=='mean':\n binmed = running_mean(flux,Npts_per_timebin)\n \n #Step 3: Calculated Combined Differential Photometric Precision (CDPP)\n if unit=='ppo':\n CDPP = np.nanstd(binmed) #in ppo per sqrt time bin\n if unit=='pph':\n CDPP = 1e2*np.nanstd(binmed) #in ppo per sqrt time bin \n if unit=='ppt': \n CDPP = 1e3*np.nanstd(binmed) #in ppt per sqrt time bin\n if unit=='ppm':\n CDPP = 1e6*np.nanstd(binmed) #in ppm per sqrt time bin\n return CDPP\n\n\ndef estimate_min_scatter(flux,initial_pix_mask,minNstd,maxNstd): #not sure if this is best, may be too strict...\n from scipy.signal import savgol_filter\n masks, scatters = [], []\n for i in range(minNstd, maxNstd):\n temp_pix_mask = thresholdmask(hdu,reference_pixel,i)\n tempflux = np.sum(flux[:,temp_pix_mask],axis=-1) \n smooth = savgol_filter(f, 1001, polyorder=5)\n masks.append(msk)\n scatters.append(scatter)\n\n # Choose the aperture that minimizes the scatter\n pix_mask = masks[np.argmin(scatters)]\n return pix_mask\n\n#### Step 2\n\n#helper functions to save apertures, image fluxes\ndef saveNDarr(multiNDarr,path,filename):\n import pickle\n output = open(path+filename+'.pkl', 'wb')\n pickle.dump(multiNDarr, output)\n output.close()\n\ndef readNDarr(path,filename):\n import pickle\n pkl_file = open(path+filename+'.pkl', 'rb')\n data = pickle.load(pkl_file)\n pkl_file.close() \n return data\n\n\ndef query_region_for_nearby_stars(ID,radial_cone_in_arcsecs):\n #stuff for getting FFI data from MAST\n from astroquery.mast import Catalogs\n import numpy as np\n import time as clock\n import requests\n \n starName=\"TIC \"+str(ID) \n radial_cone = radial_cone_in_arcsecs/ 3600.0 # angular radius in degrees\n try:\n catalogData = Catalogs.query_object(starName, radius = radial_cone, catalog = \"TIC\")\n except requests.exceptions.ConnectionError as E:\n clock.sleep(5) #pause 5 seconds then try again\n catalogData = Catalogs.query_object(starName, radius = radSearch, catalog = \"TIC\") \n # \n return catalogData\n\ndef convert_TessMag_to_Flux(Tmag):\n f = 10**(-(Tmag/2.5))\n return f\n\ndef calc_flux_contamination(ID,radial_cone_in_arcseconds=63):\n \n catalogData = query_region_for_nearby_stars(ID,radial_cone_in_arcseconds)\n \n Tmag_target_star = catalogData[0]['Tmag']\n Flux_target_star = convert_TessMag_to_Flux(Tmag_target_star)\n Flux_all = []\n for t in range(len(catalogData)):\n Flux_all = np.append(Flux_all, convert_TessMag_to_Flux(catalogData[t]['Tmag']))\n Flux_total = np.sum(Flux_all)\n \n flux_contamination_ratio = Flux_target_star / Flux_total #this is light FROM the target star\n flux_contamination_ratio = 1 - Flux_target_star / Flux_total #this is light NOT FROM the target star (will produce ratio=0 for 1 - 10^(-Tmag,target/2.5) / 10^(-Tmag,target/2.5)\n \n return flux_contamination_ratio\n \n \n\ndef SAP(ID,Sector,cutoutsize,hdu,quality_mask,threshold,cadence,reference_pixel,verbose,savelcpath,use_SPOC_aperture='no'):\n import numpy as np\n quality_mask = hdu[1].data['QUALITY']!=0\n median_image = np.nanmedian(hdu[1].data['FLUX'][~quality_mask], axis=0)\n ###\n ###\n flux = hdu[1].data['FLUX'][~quality_mask] \n rawtime = hdu[1].data['TIME'][~quality_mask]\n #include only finite values, excluding NaNs\n m = np.any(np.isfinite(flux),axis=(1,2)) \n rawtime = np.ascontiguousarray(rawtime[m],dtype=np.float64)\n flux = np.ascontiguousarray(flux[m],dtype=np.float64) \n ###\n ###\n # find dimmest pixels using inverted threshold mask\n bkg_mask = ~thresholdmask(hdu,reference_pixel=None,threshold=1/1000)\n ###\n # select brightest pixels using threshold mask\n try:\n T=threshold\n threshmask=thresholdmask(hdu,reference_pixel,T)\n except ValueError as e:\n try:\n T=threshold/2.0\n print('threshold too high, no pixels selected. Trying half of input threshold: ',str(T))\n threshmask=thresholdmask(hdu,reference_pixel,T)\n except ValueError as ee:\n try:\n T=threshold/3.0\n print('threshold STILL too high, no pixels selected. Trying third of input threshold: ',str(T))\n threshmask=thresholdmask(hdu,reference_pixel,T)\n except ValueError as eee:\n print('setting threshold=1 (last resort)')\n T=1.0\n try:\n threshmask=thresholdmask(hdu,reference_pixel,T)\n except ValueError as eeee:\n print('Ok, I tried...')\n pass\n try:\n pix_mask=threshmask\n print('selected threshold: ',T)\n except UnboundLocalError as UE:\n print('unable to find suitable threshold mask')\n return\n \n if cadence=='short':\n if use_SPOC_aperture=='yes':\n print('using SPOC aperture')\n try:\n pipeline_mask = hdu[2].data & 2 > 0\n except TypeError: # Early versions of TESScut returned floats in HDU 2\n pipeline_mask = np.ones(hdu[2].data.shape, dtype=bool)\n pix_mask = pipeline_mask\n bkg_mask = ~pipeline_mask\n if use_SPOC_aperture=='no':\n print('using threshold mask')\n ###\n ###\n #subtract background flux\n# flux -= np.median(flux[:, bkg_mask], axis=-1)[:, None, None]\n# fluxsum = np.nansum(flux[:, pix_mask], axis=-1)\n# is_allnan = ~np.any(np.isfinite(flux[:, pix_mask]), axis=1) #removing nans\n# fluxsum[is_allnan] = np.nan\n# sap_flux=fluxsum/np.nanmedian(fluxsum)\n \n \n rawflux = np.nansum(flux[:, pix_mask], axis=-1)\n bkgFlux = np.nansum(flux[:, bkg_mask], axis=-1)\n \n Npixbkg = len(np.where(bkg_mask == True)[0])\n Npixaper= len(np.where(pix_mask == True)[0])\n \n bkgFlux = bkgFlux/Npixbkg #normalize background\n \n rawsap_flux = rawflux - (bkgFlux * Npixaper)\n sap_flux = rawsap_flux / np.nanmedian(rawsap_flux)\n \n # Deblending by calculating flux contaminatio ratio for stars within 3 TESS pixels of target\n # If no othery stars are nearby, this ratio = 1\n flux_contamination_ratio = calc_flux_contamination(ID)\n # subtract and re-normalize sap_flux to complete the process\n sap_flux = sap_flux - flux_contamination_ratio\n sap_flux = sap_flux / np.nanmedian(sap_flux)\n\n \n nanmask = np.where(np.isfinite(sap_flux)==True)[0]\n# e0=hdu[1].data['FLUX_ERR'][~quality_mask]\n# error = np.nansum(e0[:, pix_mask]**2, axis=1)**0.5\n# is_allnan = ~np.any(np.isfinite(e0[:, pix_mask]), axis=1)\n# error[is_allnan] = np.nan\n\n # these provide LARGE errors for either or both TPFs/FFIs\n\n error = np.abs( sap_flux / np.nanmedian(np.nansum(sap_flux)/np.nanmedian(sap_flux)))\n error =error[nanmask]\n \n if verbose==True:\n print('len check ',' T', len(rawtime),' SAP',len(sap_flux), ' E ', len(error))\n print('shape check ',' T', np.shape(rawtime),' SAP',np.shape(sap_flux), ' E ', np.shape(error))\n ###\n ###\n ###\n SAP_LC = pd.DataFrame({\"Time\":rawtime,\"RAW SAP Flux\":rawsap_flux,\"SAP Flux\":sap_flux,\"SAP Error\":error, \"Background Flux\":bkgFlux})\n ###\n ### saving data before momentumdump removal at a later step\n ###\n if verbose==True: \n print('RAW len check:', len(rawtime),len(sap_flux),len(error))\n SAP_LC.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_RAW_LC.txt\",index=False)\n \n return bkg_mask, pix_mask ,flux, median_image, SAP_LC, flux_contamination_ratio\n \ndef get_coordinates(hdu, cadence='all'):\n \"\"\"Returns two 3D arrays of RA and Dec values in decimal degrees.\n If cadence number is given, returns 2D arrays for that cadence. If\n cadence is 'all' returns one RA, Dec value for each pixel in every cadence.\n Uses the WCS solution and the POS_CORR data from TPF header.\n Parameters\n ----------\n cadence : 'all' or int\n Which cadences to return the RA Dec coordinates for.\n Returns\n -------\n ra : numpy array, same shape as tpf.flux[cadence]\n Array containing RA values for every pixel, for every cadence.\n dec : numpy array, same shape as tpf.flux[cadence]\n Array containing Dec values for every pixel, for every cadence.\n \"\"\"\n from astropy.wcs import WCS\n import warnings\n wcs = WCS(hdu[2].header)\n X, Y = np.meshgrid(np.arange(hdu[1].data['FLUX'].shape[2]), np.arange(hdu[1].data['FLUX'].shape[1]))\n pos_corr1_pix = np.copy(hdu[1].data['POS_CORR1'])\n pos_corr2_pix = np.copy(hdu[1].data['POS_CORR2'])\n\n # We zero POS_CORR* when the values are NaN or make no sense (>50px)\n with warnings.catch_warnings(): # Comparing NaNs to numbers is OK here\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n bad = np.any([~np.isfinite(pos_corr1_pix),\n ~np.isfinite(pos_corr2_pix),\n np.abs(pos_corr1_pix - np.nanmedian(pos_corr1_pix)) > 50,\n np.abs(pos_corr2_pix - np.nanmedian(pos_corr2_pix)) > 50], axis=0)\n pos_corr1_pix[bad], pos_corr2_pix[bad] = 0, 0\n\n # Add in POSCORRs\n X = (np.atleast_3d(X).transpose([2, 0, 1]) +\n np.atleast_3d(pos_corr1_pix).transpose([1, 2, 0]))\n Y = (np.atleast_3d(Y).transpose([2, 0, 1]) +\n np.atleast_3d(pos_corr2_pix).transpose([1, 2, 0]))\n\n # Pass through WCS\n quality_mask = hdu[1].data['QUALITY']!=0\n# ra, dec = wcs.wcs_pix2world(X.ravel(), Y.ravel(), 1)\n ra, dec = wcs.wcs_pix2world(X.ravel(), Y.ravel(), 0)\n ra = ra.reshape((pos_corr1_pix.shape[0], hdu[1].data['FLUX'].shape[1], hdu[1].data['FLUX'].shape[2]))\n dec = dec.reshape((pos_corr2_pix.shape[0], hdu[1].data['FLUX'].shape[1], hdu[1].data['FLUX'].shape[2]))\n ra, dec = ra[quality_mask], dec[quality_mask]\n if cadence != 'all':\n return ra[cadence], dec[cadence]\n return ra, dec\n\ndef plot_orientation(hdu, ax):\n \n nx, ny = hdu[1].data['FLUX'].shape[1:]\n col,row = hdu[1].header['1CRPX5'], hdu[1].header['2CRPX5']\n x0, y0 = 1,ny-2.5# + int(0.5 * nx), 0 + int(0.25 * ny)\n #print(x0,y0)\n # East\n tmp = get_coordinates(hdu, cadence='all')\n ra00, dec00 = tmp[0][0][0][0], tmp[1][0][0][0]\n ra10, dec10 = tmp[0][0][0][-1], tmp[1][0][0][-1]\n theta = np.arctan((dec10 - dec00) / (ra10 - ra00))\n if (ra10 - ra00) < 0.0:\n theta += np.pi\n # theta = -22.*np.pi/180.\n x1, y1 = 1.0 * np.cos(theta), 1.0 * np.sin(theta)\n ax.arrow(x0, y0, x1, y1, head_width=0.2, color=\"black\")\n ax.text(x0 + 1.5 * x1, y0 + 1.5 * y1, \"E\", color=\"black\")\n # North\n theta = theta + 90.0 * np.pi / 180.0\n x1, y1 = 1.0 * np.cos(theta), 1.0 * np.sin(theta)\n ax.arrow(x0, y0, x1, y1, head_width=0.2, color=\"black\")\n ax.text(x0 + 1.5 * x1, y0 + 1.5 * y1, \"N\", color=\"black\")\n\n \ndef get_TESS_sources(ID,hdu,downloadpath,magnitude_limit):\n #stuff for getting data from MAST\n import astropy\n from astroquery.mast import Catalogs\n from astroquery.mast import Tesscut\n from astropy.coordinates import SkyCoord\n from astroquery.gaia import Gaia\n from astropy.wcs import WCS\n from astropy.io import fits\n import astropy.units as u\n from astropy.coordinates import SkyCoord, Angle\n from astroquery.vizier import Vizier\n import numpy as np\n import pandas as pd\n # changing cache directories\n Tesscut.cache_location=downloadpath\n Catalogs.cache_location=downloadpath\n Vizier.cache_location=downloadpath\n ###\n starName=\"TIC \"+str(ID)\n pix_scale=21\n cone = 0.5*np.nanmax(hdu[1].data['FLUX'].shape[1:]) * pix_scale\n cone = 5*pix_scale\n# result = Catalogs.query_object(starName, catalog = \"TIC\",radius=Angle(cone, \"arcsec\"))\n ###\n ra=hdu[2].header['RA_OBJ']\n dec=hdu[2].header['DEC_OBJ'] \n # Get the positions of the Gaia sources\n frame='icrs'\n c1 = SkyCoord(ra,dec, frame=frame, unit='deg')\n result = Catalogs.query_region(c1, catalog = \"TIC\",radius=Angle(cone, \"arcsec\"))\n \n no_targets_found_message = ValueError('Either no sources were found in the query region '\n 'or Vizier is unavailable')\n too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit))\n \n print('')\n# print(result.columns)\n# result=result.filled()\n result=result.to_pandas()\n result = result[result['Tmag'] < magnitude_limit]\n ###\n if len(result) == 0:\n raise no_targets_found_message\n \n return result\n\ndef get_GAIA_sources(ID,hdu, downloadpath,magnitude_limit):\n #stuff for getting data from MAST\n import astropy\n from astroquery.mast import Catalogs\n from astroquery.mast import Tesscut\n from astropy.coordinates import SkyCoord\n from astroquery.gaia import Gaia\n from astropy.wcs import WCS\n from astropy.io import fits\n import astropy.units as u\n from astropy.coordinates import SkyCoord, Angle\n from astroquery.vizier import Vizier\n import numpy as np\n import pandas as pd\n ###\n # changing cache directories\n Tesscut.cache_location=downloadpath\n Catalogs.cache_location=downloadpath\n Vizier.cache_location=downloadpath\n ###\n starName=\"TIC \"+str(ID)\n pix_scale=21\n \n ra=hdu[2].header['RA_OBJ']\n dec=hdu[2].header['DEC_OBJ'] \n # Get the positions of the Gaia sources\n c1 = SkyCoord(ra,dec, frame='icrs', unit='deg')\n # Use pixel scale for query size\n pix_scale = 21.0\n# Vizier.ROW_LIMIT = -1 # doesn't include target star (?)\n cone = 0.5*np.nanmax(hdu[1].data['FLUX'].shape[1:]) * pix_scale\n cone = 5*pix_scale\n try:\n result = Vizier.query_region(c1, catalog=[\"I/345/gaia2\"],radius=Angle(cone, \"arcsec\"))\n# result = Vizier.query_region(c1, catalog=[\"GAIA\"],radius=Angle(cone, \"arcsec\"))\n except requests.exceptions.ConnectionError as CE:\n print(CE)\n import time as clock\n clock.sleep(10)\n print('trying Vizier query again')\n result = Vizier.query_region(c1, catalog=[\"I/345/gaia2\"],radius=Angle(cone, \"arcsec\"))\n print('')\n #\n no_targets_found_message = ValueError('Either no sources were found in the query region '\n 'or Vizier is unavailable')\n too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit))\n \n if result is None:\n raise no_targets_found_message\n elif len(result) == 0:\n raise too_few_found_message\n result = result[\"I/345/gaia2\"].to_pandas() #using GAIA DR2 \n result = result[result.Gmag < magnitude_limit]\n \n #rename RA DEC columns to match TESS query\n result=result.rename(columns={'RA_ICRS': 'ra','DE_ICRS': 'dec', 'pmRA':'pmRA', 'pmDE':'pmDEC'})\n \n return result\n\n\ndef Get_stellar_params(ID,downloadpath):\n from transitleastsquares import catalog_info\n #stuff for getting FFI data from MAST\n import astropy\n from astroquery.mast import Catalogs\n from astroquery.mast import Tesscut\n from astropy.coordinates import SkyCoord\n from astroquery.gaia import Gaia\n from astropy.wcs import WCS\n from astropy.io import fits\n import astropy.units as u\n import numpy as np\n import time as clock\n import requests\n # changing cache directories\n Tesscut.cache_location=downloadpath\n Catalogs.cache_location=downloadpath\n \n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n\n starName=\"TIC \"+str(ID) \n radSearch = 21/3600.0 # angular radius in degrees\n catalogData = Catalogs.query_object(starName, radius = radSearch, catalog = \"TIC\")\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n\n starName=\"TIC \"+str(ID) \n radSearch = 21/3600.0 # angular radius in degrees\n catalogData = Catalogs.query_object(starName, radius = radSearch, catalog = \"TIC\")\n ###\n ra = catalogData[0]['ra']\n dec = catalogData[0]['dec']\n coord = SkyCoord(ra, dec, unit = \"deg\")\n Tmag=catalogData[0]['Tmag']\n Gmag=catalogData[0]['GAIAmag']\n Vmag=catalogData[0]['Vmag']\n rmag=catalogData[0]['rmag']\n imag=catalogData[0]['imag']\n zmag=catalogData[0]['zmag'] \n Jmag=catalogData[0]['Jmag']\n Hmag=catalogData[0]['Hmag']\n Kmag=catalogData[0]['Kmag']\n Teff=catalogData[0]['Teff'] \n logg=catalogData[0]['logg']\n rho=catalogData[0]['rho']\n dist=catalogData[0]['d']\n ###\n ###\n if str(Vmag)==str('--'): Vmag=np.nan\n if str(Tmag)==str('--'): Tmag=np.nan\n if str(Gmag)==str('--'): Gmag=np.nan\n if str(rmag)==str('--'): rmag=np.nan\n if str(imag)==str('--'): imag=np.nan\n if str(zmag)==str('--'): zmag=np.nan\n if str(Jmag)==str('--'): Jmag=np.nan \n if str(Hmag)==str('--'): Hmag=np.nan\n if str(Kmag)==str('--'): Kmag=np.nan\n if str(logg)==str('--'): logg=np.nan\n if str(rho)==str('--'): rho=np.nan\n if str(dist)==str('--'): dist=np.nan\n ###\n return Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist\n\ndef plot_catalog_sources(ID,hdu,ax, downloadpath,magnitude_limit,catalog,dot_scale=35,dolegend=\"no\"):\n #stuff for getting data from MAST\n import astropy\n from astroquery.mast import Catalogs\n from astroquery.mast import Tesscut\n from astropy.coordinates import SkyCoord\n from astroquery.gaia import Gaia\n from astropy.wcs import WCS\n from astropy.io import fits\n import astropy.units as u\n from astropy.coordinates import SkyCoord, Angle\n from astroquery.vizier import Vizier\n import numpy as np\n import pandas as pd\n \n # changing cache directories\n Tesscut.cache_location=downloadpath\n Catalogs.cache_location=downloadpath\n Vizier.cache_location=downloadpath\n \n if catalog=='TESS':\n result = get_TESS_sources(ID,hdu, downloadpath, magnitude_limit)\n if catalog=='GAIA':\n result = get_GAIA_sources(ID,hdu, downloadpath,magnitude_limit)\n \n no_targets_found_message = ValueError('Either no sources were found in the query region '\n 'or Vizier is unavailable')\n too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit))\n \n if result is None:\n raise no_targets_found_message\n pass\n elif len(result) == 0:\n raise too_few_found_message\n pass\n else:\n radecs = np.vstack([result['ra'], result['dec']]).T\n wcs = WCS(hdu[2].header)\n coords = wcs.all_world2pix(radecs, 0) \n #coords = wcs.wcs_world2pix(radecs, 1) #test if this is better?\n ###\n year = ((np.nanmin(hdu[1].data['TIME'])+2457000 ) * u.day).to(u.year)\n ###\n pmra = ((np.nan_to_num(np.asarray(result['pmRA'])) * u.milliarcsecond/u.year) * year).to(u.arcsec).value\n pmdec = ((np.nan_to_num(np.asarray(result['pmDEC'])) * u.milliarcsecond/u.year) * year).to(u.arcsec).value\n result['ra'] += pmra\n result['dec'] += pmdec \n ###\n if catalog=='TESS':\n df = pd.DataFrame(data=dict(x=coords[:, 0], y=coords[:, 1], mag=result['Tmag']))\n if catalog=='GAIA':\n df = pd.DataFrame(data=dict(x=coords[:, 0], y=coords[:, 1], mag=result['Gmag']))\n Nbins= 22#(int(magnitude_limit)+1)\n #print('Nbins',Nbins)\n bins = np.linspace(df.mag.min(), df.mag.max(), Nbins)\n grouped = df.groupby(np.digitize(df.mag, bins))\n ###\n ###\n #print('sizes',sizes)\n labels=[]\n for i in range(Nbins):\n labels=np.append(labels,str(i))\n cm = plt.get_cmap('gist_rainbow')\n ax.set_prop_cycle(color=[cm(1.*i/Nbins) for i in range(Nbins)])\n\n df=df.sort_values(by=['mag']).reset_index()\n del df['index']\n\n scale=dot_scale\n sizes=[scale*(len(df)+1)] \n ### Get stellar params for target star\n Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist = Get_stellar_params(ID,downloadpath) \n reference_pixel=[hdu[1].header['1CRPX4']-1,hdu[1].header['2CRPX4']-1]\n #print('targ gmag',Gmag)\n sizes = 128 / 2**((np.array(df['mag'])-Gmag))\n # for i in range(len(df)):\n # sizes = np.append(sizes,scale*(i+1))\n\n cad=(np.round(np.nanmedian(np.diff(hdu[1].data['TIME']))*60*24))\n if cad==30:\n offset=1\n if cad==2:\n offset=1.5\n offset=0\n\n #try to keep labels inside bounds of cutout image\n xlimit = np.shape(hdu[1].data['FLUX'][0])[0]-1\n ylimit = np.shape(hdu[1].data['FLUX'][0])[1]-1\n\n for x in reversed(range(len(df))): #s=base_ms / 2 ** dmag,\n if (df['x'][x]-offset > xlimit) or (df['y'][x]-offset > ylimit):\n pass\n else:\n ax.scatter(df['x'][x]-offset,df['y'][x]-offset,s=sizes[x],color='cyan')\n #print(sizes[x])\n ax.text(df['x'][x]-offset,df['y'][x]-offset, str(int(df['mag'][x])), color=\"red\", zorder=100,fontsize=16)\n ###\n ax.text(reference_pixel[0],reference_pixel[1], str(int(Gmag)), color=\"black\", zorder=100,fontsize=16)\n ###\n ###\n if dolegend==\"yes\": \n handles, labels = ax.get_legend_handles_labels()\n by_label = dict(zip(labels, handles))\n \n if catalog=='TESS':\n if len(by_label.values())>4:\n ncols=2\n else:\n ncols=1\n ax.legend(by_label.values(), by_label.keys(),bbox_to_anchor=(1.4,1.1),\\\n loc='upper right',fontsize=8,ncol=ncols,markerscale=1,\\\n labelspacing=2.5,title=\"TESS Mag\") \n if catalog=='GAIA': \n if len(by_label.values())>4:\n ncols=2\n else:\n ncols=1 \n ax.legend(by_label.values(), by_label.keys(),bbox_to_anchor=(1.4,1.1),\\\n loc='upper right',fontsize=8,ncol=ncols,markerscale=1,\\\n labelspacing=1.,title=\"GAIA Mag\") \n ### \n #plot orientation of cutout (N,E)\n plot_orientation(hdu,ax)\n ###\n return bins, sizes,labels,grouped,result,df\n\ndef plot_cutouts(ID,Sector,cadence,hdu,pix_mask,bkg_mask,reference_pixel,fig,axes,savelcpath,downloadpath,do_colorbar='yes'):\n import astropy\n from astroquery.mast import Catalogs\n from astroquery.mast import Tesscut\n from astropy.coordinates import SkyCoord\n from astroquery.gaia import Gaia\n from astropy.wcs import WCS\n from astropy.io import fits\n import astropy.units as u\n from astropy.coordinates import SkyCoord, Angle\n from astroquery.vizier import Vizier\n import numpy as np\n import pandas as pd\n# from NEMESIS_pipeline import centroid_quadratic,Get_stellar_params \n \n # changing cache directories\n Tesscut.cache_location=downloadpath\n Catalogs.cache_location=downloadpath\n Vizier.cache_location=downloadpath\n \n\n if len(axes)==1:\n ax1=axes[0]\n if len(axes)==2:\n ax1=axes[0]\n ax2=axes[1]\n \n wcs = WCS(hdu[2].header)\n median_image = np.nanmedian(hdu[1].data['FLUX'],axis=0)\n cutoutsize=np.shape(hdu[1].data['FLUX'][0])[0]\n centx,centy = centroid_quadratic(median_image, pix_mask,reference_pixel)\n cxs,cys = check_centroids(ID,Sector,cutoutsize,cadence,reference_pixel,savelcpath)\n \n fs=12\n ms=20\n if cadence=='long':\n ax1.set_title('FFI cutout')\n if cadence=='short':\n ax1.set_title('TPF cutout') \n medi_img1 = ax1.imshow(median_image, cmap=\"gray_r\",alpha=0.9, aspect=\"auto\") \n medi_img1.set_clim(vmin=0, vmax=np.nanmax(median_image))\n ax1.plot(reference_pixel[0],reference_pixel[1],'bX',markersize=ms)\n ax1.plot(centx,centy,'yX',markersize=ms)\n ax1.plot(cxs,cys,color='purple',marker='.',markersize=3,linestyle='none') \n if do_colorbar==\"yes\": \n cbar = fig.colorbar(medi_img1,ax=ax1,pad=0.01)\n cbar.ax.set_ylabel('Median Photon Counts', fontsize = fs,rotation=270)\n cbar.ax.get_yaxis().labelpad = fs\n ax1.invert_yaxis()\n ax1.set_xlim(-0.5,np.shape(hdu[1].data['FLUX'][0])[0]-1)\n ax1.set_ylim(-0.5,np.shape(hdu[1].data['FLUX'][0])[1]-1)\n ax1.coords[0].set_axislabel('RA')\n ax1.coords[1].set_axislabel('DEC',minpad=-2) #minpad =labelpad\n ###\n# ax1.coords[0].set_major_formatter('hh:mm:ss')\n# ax1.coords[1].set_major_formatter('hh:mm:ss')\n ax1.coords[0].set_major_formatter('d.dddd')\n ax1.coords[1].set_major_formatter('d.dddd') \n ###\n Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist = Get_stellar_params(ID,downloadpath)\n ###\n cm = plt.get_cmap('gist_rainbow') \n #also plot RA,DEC from pixel coordinates (TAN), more digits than (_OBJ)\n ra2=hdu[2].header['CRVAL1'] \n dec2=hdu[2].header['CRVAL2']\n \n px,py = wcs.all_world2pix([[ra2,dec2]],0)[0]\n# px,py = wcs.all_world2pix([[ra2,dec2]],1)[0]\n if len(axes)==1:\n try:\n bins, sizes,labels,grouped,result,df = plot_catalog_sources(ID,hdu,ax1,downloadpath,magnitude_limit=18,catalog='GAIA',dot_scale=10,dolegend=\"no\")\n except ValueError as e:\n print(e)\n ax1.imshow(pix_mask, cmap=\"Reds\", alpha=0.9, aspect=\"auto\") #selected aperture\n ax1.imshow(bkg_mask, cmap=\"Purples\", alpha=0.25, aspect=\"auto\")\n \n if len(axes)==2: \n try:\n bins, sizes,labels,grouped,result,df = plot_catalog_sources(ID,hdu,ax1,downloadpath,magnitude_limit=18,catalog='GAIA',dot_scale=10,dolegend=\"no\")\n except ValueError as e:\n print(e)\n ax2.set_title('Aperture and Background Masks')\n medi_img2 = ax2.imshow(median_image, cmap=\"gray_r\",alpha=0.9, aspect=\"auto\") \n medi_img2.set_clim(vmin=0, vmax=np.nanmax(median_image))\n ax2.imshow(pix_mask, cmap=\"Reds\", alpha=0.9, aspect=\"auto\") #selected aperture\n ax2.imshow(bkg_mask, cmap=\"Purples\", alpha=0.25, aspect=\"auto\")\n ax2.plot(reference_pixel[0],reference_pixel[1],'rX',markersize=ms)\n ax2.plot(centx,centy,'yX',markersize=ms)\n ax2.plot(cxs,cys,color='purple',marker='.',markersize=3,linestyle='none')\n if do_colorbar==\"yes\": \n cbar = fig.colorbar(medi_img2,ax=ax2,pad=0.01)\n cbar.ax.set_ylabel('Median Photon Counts', fontsize = fs,rotation=270)\n cbar.ax.get_yaxis().labelpad = fs\n ax2.invert_yaxis()\n ax2.set_xlim(-0.5,np.shape(hdu[1].data['FLUX'][0])[0]-1)\n ax2.set_ylim(-0.5,np.shape(hdu[1].data['FLUX'][0])[1]-1)\n ax2.coords[0].set_axislabel('RA')\n ax2.coords[1].set_axislabel('DEC',minpad=-2) #minpad =labelpad\n# ax2.coords[0].set_major_formatter('hh:mm:ss')\n# ax2.coords[1].set_major_formatter('hh:mm:ss')\n ax2.coords[0].set_major_formatter('d.dddd')\n ax2.coords[1].set_major_formatter('d.dddd')\n \n \n\n####Step 4 (Two funcs b/c they're long and a 3rd to apply one)\ndef momentumdump_check(SectorNum):\n SectorNum=float(SectorNum)\n if SectorNum==1 or SectorNum==2 or SectorNum==3 or SectorNum==4:\n mdumps=2.5\n if SectorNum==1:\n #orbit start times\n t_0 = 1325.29278\n t_1 = 1338.52153\n if SectorNum==2:\n #orbit start times\n t_0 = 1354.10102\n t_1 = 1368.59406\n if SectorNum==3:\n #orbit start times\n t_0 = 1382.03987+mdumps #since it's cut out in mdump removal below\n t_1 = 1396.60497 \n if SectorNum==4:\n #orbit start times\n t_0 = 1410.89974\n t_1 = 1424.54897\n ### \n ### \n elif SectorNum==5:\n mdumps=3.0\n #orbit start times\n t_0 = 1437.82566\n t_1 = 1451.54898 \n ### \n elif SectorNum==6 or SectorNum==7 or SectorNum==8 or SectorNum==9 or SectorNum==10 or SectorNum==11 or SectorNum==12:\n mdumps=3.125\n if SectorNum==6:\n #orbit start times\n t_0 = 1465.21262\n t_1 = 1478.11304\n if SectorNum==7:\n #orbit start times\n t_0 = 1491.62553\n t_1 = 1504.69775\n if SectorNum==8:\n #orbit start times\n t_0 = 1517.34150\n t_1 = 1530.25816\n if SectorNum==9:\n #orbit start times\n t_0 = 1543.21648\n t_1 = 1557.00080\n if SectorNum==10:\n #orbit start times\n t_0 = 1569.43176\n t_1 = 1582.76231\n if SectorNum==11:\n #orbit start times\n t_0 = 1596.77203\n t_1 = 1610.77620\n if SectorNum==12:\n #orbit start times\n t_0 = 1624.94979\n t_1 = 1640.03312\n elif SectorNum==13: \n mdumps=3.375\n #orbit start times\n t_0 = 1653.91505\n t_1 = 1668.61921\n ### \n elif SectorNum==14:\n mdumps=4.4\n #orbit start times\n t_0 = 1683.34838\n t_1 = 1697.33865 \n ### \n elif SectorNum==31:\n mdumps=6 # 6-ish, half way thru each orbit. We won't use this number,just need an output\n #orbit start times\n t_0 = 2144.50927\n t_1 = 2157.45371\n ### \n return mdumps,t_0,t_1\n\n\n# this will remove regions of high scatter or when Earth/Moon (EM) is in field of view\ndef momentumdump_removal(SectorNum,Camera,CCD,before_after_in_minutes, time):\n import numpy as np\n# print('mA: ',len(time))\n SectorNum=float(SectorNum)\n ### \n if SectorNum==1:\n #orbit start times\n t_0 = 1325.29278\n t_1 = 1338.52153\n mdumps=2.5\n jittermask = (time < 1347) | (time > 1350)\n ###\n if Camera==1:\n# print('go')\n EM_glintmask = (time < time[-1]) | (time > (time[-1]-2)) #last 2 days no good\n mask = (jittermask) & (EM_glintmask)\n else:\n mask = (jittermask) \n ### \n if SectorNum==2:\n mdumps=2.5\n #orbit start times\n t_0 = 1354.10102\n t_1 = 1368.59406\n ### \n #no jittermask for 2\n mask = np.ones(len(time), dtype=bool) #makes them all TRUE\n ### \n if SectorNum==3:\n mdumps=2.5\n #orbit start times\n t_0 = 1382.03987+mdumps #since it's cut out below\n t_1 = 1396.60497 \n jittermask = (time < 1382) | (time > 1385.9)\n jittermask2 = (time < 1395.2325) | (time > 1396.6)\n jittermask3 = (time < 1406.2)\n ###\n mask = (jittermask) & (jittermask2) & (jittermask3)\n ### \n if SectorNum==4:\n #orbit start times\n t_0 = 1410.89974\n t_1 = 1424.54897\n #momentem dump rate\n mdumps=2.5\n jittermask = (time > 1413) | (time < 1436.85)\n jittermask2 = (time < 1418.53691) | (time > 1424.5) #anomaly\n ### \n if Camera==1: \n EM_glintmask = (time < 1422.2297) | (time > 1423.502) \n EM_glintmask2 = (time < 1436.1047) | (time > 1436.8353) \n ###\n ### \n mask = (jittermask) & (jittermask2) & (EM_glintmask) & (EM_glintmask2)\n else:\n #combine masks\n mask = (jittermask) & (jittermask2) \n ### \n if SectorNum==5:\n mdumps=3.0\n #orbit start times\n t_0 = 1437.82566\n t_1 = 1451.54898 \n ### \n #no jittermask for Sector 5\n ### \n if Camera==1: \n EM_glintmask = (time < 1463.93945) | (time > 1464.25) \n mask = (np.ones(len(time), dtype=bool)) & (EM_glintmask)\n else:\n mask = np.ones(len(time), dtype=bool) #makes them all TRUE\n ### \n if SectorNum==6 or SectorNum==7:\n mdumps=3.125\n ### \n if SectorNum==6:\n #orbit start times\n t_0 = 1465.21262\n t_1 = 1478.11304\n if SectorNum==7:\n #orbit start times\n t_0 = 1491.62553\n t_1 = 1504.69775\n ### \n #no jittermask for Sector 6 OR 7\n mask = np.ones(len(time), dtype=bool) #makes them all TRUE\n ### \n if SectorNum==8:\n mdumps=3.125\n #orbit start times\n t_0 = 1517.34150\n t_1 = 1530.25816\n ### \n #no jittermask for Sector 8\n ### \n EM_glintmask = (time < 1516) | (time > 1517.75) #earth glint at start of orbit \n EM_glintmask2 = (time < 1529) | (time > 1536.2) #earth glint at start of orbit\n mask = (EM_glintmask) & (EM_glintmask2)\n ### \n if SectorNum==9:\n mdumps=3.125\n #orbit start times\n t_0 = 1543.21648\n t_1 = 1557.00080\n ### \n EM_glintmask = (time < 1542.21) | (time > 1544.0) #earth glint at start of orbit \n EM_glintmask2 = (time < 1556.0) | (time > 1557.75) #earth glint at start of orbit\n mask = (EM_glintmask) & (EM_glintmask2)\n ### \n if SectorNum==10:\n mdumps=3.125\n #orbit start times\n t_0 = 1569.43176\n t_1 = 1582.76231\n ### \n EM_glintmask = (time < (1569.43-1)) | (time > (1571.0)) #earth glint at start of orbit\n EM_glintmask2 = (time < (1582.76-1)) | (time > (1584.5)) #earth glint at start of orbit\n mask = (EM_glintmask) & (EM_glintmask2)\n ### \n if SectorNum==11:\n mdumps=3.125\n #orbit start times\n t_0 = 1596.77203\n t_1 = 1610.77620\n ### \n EM_glintmask = (time < (1596.77-1)) | (time > (1599)) #earth glint at start of orbit \n EM_glintmask2 = (time < (1610)) | (time > (1613.75)) #earth glint at start of orbit\n mask = (EM_glintmask) & (EM_glintmask2)\n ###\n if SectorNum==12:\n mdumps=3.125 \n #orbit start times\n t_0 = 1624.94979\n t_1 = 1640.03312\n ### \n EM_glintmask = (time < (1624.949-1)) | (time > (1624.949+0.75)) #earth glint at start of orbit \n EM_glintmask2 = (time < (1640.03-1)) | (time > (1640.03+0.75)) #earth glint at start of orbit\n mask = (EM_glintmask) & (EM_glintmask2)\n ###\n ### \n if SectorNum==13: \n mdumps=3.375\n #orbit start times\n t_0 = 1653.91505\n t_1 = 1668.61921\n ### \n jittermask = (time < 1665.2983) | (time > 1665.3501) \n EM_glintmask = (time < (1653.915-1)) | (time > (1653.915+0.75)) #earth glint at start of orbit\n EM_glintmask2 = (time < (1668.61903-1)) | (time > (1668.619+0.75)) #earth glint at start of orbit\n mask = (jittermask) & (EM_glintmask) & (EM_glintmask2)\n ###\n if SectorNum==14:\n mdumps=4.4\n #orbit start times\n t_0 = 1683.34838\n t_1 = 1697.33865 \n #no jittermask for Sector 14 (...yet)\n mask = np.ones(len(time), dtype=bool) #makes them all TRUE\n ### \n ### NEED TO ADD NORTH SECTORS!!!! ALSO EXTENDED MISSION SECTORS!\n ###\n if SectorNum==31:\n # DRN says 1 mdump occurred half way in each orbit...\n #orbit start times\n t_0 = 2144.50927\n t_0end = 2157.45371\n t_1 = 2158.85648\n t_1end = 2169.94398 \n mdumps = 0\n mask = np.ones(len(time), dtype=bool) #makes them all TRUE\n \n \n ### Need a special case for Sector 31 (and maybe others...)\n if SectorNum==31:\n mask_mdump = np.ones_like(time, dtype=bool) \n timedump1 =(t_0end - t_0)/2 # halfway in orbit like DRN says...\n timedump2 =(t_1end - t_1)/2 # halfway in orbit like DRN says...\n for t in range(len(time)):\n if (time[t]>(timedump1-before_after_in_minutes/(60*24))) & (time[t]<(timedump1+before_after_in_minutes/(60*24))):\n mask_mdump[t]=0\n if (time[t]>(timedump2-before_after_in_minutes/(60*24))) & (time[t]<(timedump2+before_after_in_minutes/(60*24))):\n mask_mdump[t]=0\n else:\n Num_mdumps = int(np.round((np.nanmax(time) - np.nanmin(time))/mdumps,2))+1\n mask_mdump = np.ones_like(time, dtype=bool) \n for N in range(Num_mdumps): \n for t in range(len(time)):\n if t_0+(N)*mdumps(timedump-before_after_in_minutes/(60*24))) & (time[t]<(timedump+before_after_in_minutes/(60*24))):\n mask_mdump[t]=0\n if t_1+(N)*mdumps (timedump-before_after_in_minutes/(60*24))) & (time[t]<(timedump+before_after_in_minutes/(60*24))):\n mask_mdump[t]=0\n ###\n ### \n\n \n \n combo_mask = (mask_mdump) & (mask) \n combo_mask2 = np.where(combo_mask==True)[0] #throwing out momentum dumps and bad data (False booleans) \n mask_mdump=combo_mask2\n ###\n ###\n print('time before/after momentum dumps to remove: ',before_after_in_minutes, 'minutes')\n print('before momentumdump removal: ',len(time)) \n print('after: ',len(mask_mdump)) \n# return mask,mask_mdump,mdumps,t_0,t_1 #mask,mdumps,start of 1st/2nd orbits\n return mask_mdump,mdumps,t_0,t_1 #mask,mdumps,start of 1st/2nd orbits\n\n####Still on Step 4 here...\ndef Applying_Mdump_removal(ID,Sector,Camera,CCD,before_after_in_minutes,SAP_LC,flux,savelcpath,verbose):\n import pandas as pd\n ###\n ###\n ### read in data from dataframe\n rawtime = np.array(SAP_LC['Time'].to_list()) \n sap_flux = np.array(SAP_LC['SAP Flux'].to_list()) \n sap_error = np.array(SAP_LC['SAP Error'].to_list()) \n bkg_flux = np.array(SAP_LC['Background Flux'].to_list()) \n ###\n ### \n ### making mask for momentum dumps and bad regions of data\n mask_mdump,mdumps,t_0,t_1 = momentumdump_removal(Sector,Camera,CCD,before_after_in_minutes, rawtime)\n ###\n ### \n ### Applying mask and then saving masked data\n time=rawtime[mask_mdump]\n sap_flux=sap_flux[mask_mdump]\n sap_error=sap_error[mask_mdump]\n bkg_flux=bkg_flux[mask_mdump]\n flux=flux[mask_mdump] # removing frames with momentumdumps\n ###\n if verbose==True:\n print('clipped len check:', len(time),len(sap_flux),len(sap_error))\n clippedRAWLC_df = pd.DataFrame({\"Time\":time, \"SAP Flux\": sap_flux, \"SAP Error\":sap_error,\"Background Flux\":bkg_flux})\n ###\n clippedRAWLC_df.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_RAW_LC_systematics_removed.txt\",index=False)\n ###\n if verbose==True:\n print('len check b',' T', len(time),' SAP',len(sap_flux), ' E ', len(sap_error))\n ###\n ### \n return mask_mdump, mdumps,t_0,t_1, flux, SAP_LC, clippedRAWLC_df\n\n\n\n\n\n# helper functions for smoothing and outlier removal based on stellar parameters\n\n\ndef SMA_AU_from_Period_to_stellar(Period,R_star,M_star):\n #assumes circular orbit\n #using Kepler's third law, calculate SMA\n #solar units\n RS = 6.955*10.0**10.0 #cm, solar radius\n MS = 1.989*10.0**33.0 #g, solar mass\n\n G = 6.6743*10.0**(-8.0) #cm^3 per g per s^2\n\n R = R_star*RS\n M = M_star*MS\n P=Period*60.0*24.0*60.0 #in seconds\n\n #SMA\n SMA_cm = ((G*M*(P**2))/(4*(np.pi**2)))**(1/3) \n\n #note R_star is already in solar units so we need to convert to cm using\n # solar radius as a constant\n Stellar_Radius = R #now in cm\n\n SMA = SMA_cm / Stellar_Radius #now unitless (cm / cm)\n return SMA, SMA_cm\n\ndef Tdur(Period, R_star,M_star, R_planet_RE): \n RE = 6.378*10.0**8 #cm\n RS = 6.955 *10.0**10 #cm \n A = Period/np.pi #in days\n \n SMA_cm = SMA_AU_from_Period_to_stellar(Period,R_star,M_star)[1]\n \n B =(R_star*RS +R_planet_RE*RE)/ SMA_cm #in cm\n \n T_dur = A*np.arcsin(B) #in days\n return T_dur\n\ndef stellar_insolation(Ps, Ms, Rs, Teff):\n '''Return the insolation in Earth units'''\n #these are all cgs units: cm, g, s \n G=6.6743*10**(-8)\n Msun=1.989 *10**33\n Mearth=5.974*10**27\n Rsun=6.955*10**10\n Rearth=6.378*10**8\n AU=1.496*10**13\n pc=3.086 *10**18\n \n S0 = 1367#/1e0 # Watts per m^2 in watts per cm^2\n \n sigma = 5.67e-8\n\n L = 4*np.pi*(Rs*RS)**2 * sigma*Teff**4\n LE = 4*np.pi*(1*RS)**2 * sigma*5777**4\n \n Ps = np.ascontiguousarray(Ps)\n \n smas,smas_cm = SMA_AU_from_Period_to_stellar(Ps,Rs,Ms)\n smasE,smasE_cm = SMA_AU_from_Period_to_stellar(365.25,1,1)\n \n S= L / (4*np.pi*smas_cm**2)\n SE = LE / (4*np.pi*smasE_cm**2)\n \n \n S_in_earth_units = S/SE\n \n return S, S_in_earth_units\n\n\ndef BWMC_auto(ID,Sector,input_LC,savelcpath): #bt = break tolerance, pipeline uses window_size/2.0\n from wotan import flatten \n import numpy as np\n \n #read in LC data\n time = np.array(input_LC['Time'].to_list())\n flux_raw = np.array(input_LC['Flux'].to_list())\n flux_error = np.array(input_LC['Error'].to_list())\n flux_model = np.array(input_LC['Model'].to_list()) \n centx = np.array(input_LC[\"Centroid X Positions\"].to_list())\n centy = np.array(input_LC[\"Centroid Y Positions\"].to_list())\n sap_flux=np.array(input_LC['SAP Flux'].to_list())\n sap_error=np.array(input_LC['SAP Error'].to_list())\n \n LCDur=(np.nanmax(time) - np.nanmin(time))\n maxP = LCDur/2 #longest period for 2 transits in a light curve (~14 days for TESS single sector LCs)\n R_planet_RE = 1\n \n #getting stellar parameters from TIC\n from transitleastsquares import catalog_info\n \n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n \n # we want to keep the longest transit for an Earth-like planet for a single sector of data\n # using stellar parameters to determine transit duration\n window_size = 3*Tdur(maxP, R_star,M_star, R_planet_RE)\n \n flatten_lc, trend_lc = flatten(time, flux_raw, window_length=window_size, return_trend=True, break_tolerance=window_size/2.0,method='biweight',robust=True)\n T=time\n F=flatten_lc\n FE=flux_error\n #\n #checking for NaNs\n nanmask = np.where(np.isfinite(F)==True)[0]\n T = T[nanmask]\n F = F[nanmask]\n FE =FE[nanmask]\n F_raw = flux_raw[nanmask]\n flux_model = flux_model[nanmask]\n trend_lc=trend_lc[nanmask]\n centx=centx[nanmask]\n centy=centy[nanmask] \n sap_flux=sap_flux[nanmask]\n sap_error=sap_error[nanmask] \n #\n #redefining output terms\n time = T\n flux_detrended = F\n det_error = FE/trend_lc\n ###\n ###\n \n Det_LC = pd.DataFrame({'Time':time, 'SAP Flux':sap_flux,'SAP Error':sap_error,'PLD Flux':F_raw,'PLD Error':FE,'PLD Model':flux_model,'Detrended Flux':flux_detrended, 'Detrended Error':det_error,'Fitted Trend':trend_lc,\"Centroid X Positions\":centx,\"Centroid Y Positions\":centy})\n Det_LC.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_DET_LC.txt\")\n return Det_LC, nanmask\n\n\n#helper function for PLD modeler\ndef solver(X,flux):\n import warnings,sys\n # warnings.filterwarnings(action='once') #useful to see a warning once but that's it\n warnings.simplefilter(\"ignore\", category=PendingDeprecationWarning)\n if not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n os.environ[\"PYTHONWARNINGS\"] = \"ignore\" # Also affect subprocesses\n import numpy as np\n from numpy.linalg import LinAlgError\n a=np.dot(X.T,X)\n b=np.dot(X.T,flux)\n try:\n w=np.linalg.solve(a,b) \n model=np.dot(X,w)\n except LinAlgError:\n print('np.linalg.solve gave Singular Matrix problem. Using np.linalg.lstsq')\n w=np.linalg.lstsq(a, b)[0]\n model=np.dot(X,w) \n return model\n\n\ndef PLD_model_old(ID,Sector,flux,pix_mask,input_LC,savelcpath,pld_order=3, n_pca_terms=3):\n import warnings,sys\n # warnings.filterwarnings(action='once') #useful to see a warning once but that's it\n warnings.simplefilter(\"ignore\", category=PendingDeprecationWarning)\n if not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n os.environ[\"PYTHONWARNINGS\"] = \"ignore\" # Also affect subprocesses\n ###making solver function for SVD or Least Squares solutions\n import numpy as np\n from numpy.linalg import LinAlgError\n from sklearn.decomposition import PCA\n from itertools import combinations_with_replacement as CwR\n \n #read in LC data:\n time = np.array(input_LC['Time'])\n input_flux = np.array(input_LC['Flux'])\n error = np.array(input_LC['Error'])\n centx = np.array(input_LC[\"Centroid X Positions\"])\n centy = np.array(input_LC[\"Centroid Y Positions\"]) \n \n aperture = [pix_mask for i in range(len(flux))]\n fpix_rs = (flux*aperture).reshape(len(flux),-1)\n fpix_ap = np.zeros((len(flux),len(np.delete(fpix_rs[0],np.where(np.isnan(fpix_rs[0]))))))\n\n for c in range(len(fpix_rs)):\n naninds = np.where(np.isnan(fpix_rs[c]))\n fpix_ap[c] = np.delete(fpix_rs[c],naninds)\n\n newflux = np.sum(fpix_ap,axis=1)\n newX = fpix_ap / newflux.reshape(-1,1)\n \n # 1st order PLD\n f1 = fpix_ap / newflux.reshape(-1,1)\n pca = PCA(n_components = n_pca_terms)\n X1 = pca.fit_transform(f1)\n \n # Nth order PLD\n XN=[]\n for order in range(2,pld_order+1):\n fN = np.prod(list(CwR(X1.T, order)), axis=1).T #axis=+1?\n pca = PCA(n_components = n_pca_terms)\n X_n = pca.fit_transform(fN)\n XN.append(X_n)\n\n X_pld = np.concatenate(XN, axis=1) #axis=+1?\n \n \n pld_model = solver(X_pld , input_flux)+ np.nanmedian(input_flux)\n pld_detrended = input_flux/pld_model \n pld_error = error/pld_model\n\n nanmask = np.where(np.isfinite(pld_detrended)==True)[0] #make nanmask on last step (PLD/DET/SAP)\n time = time[nanmask]\n input_flux = input_flux[nanmask]\n error = error[nanmask]\n pld_detrended = pld_detrended[nanmask]\n pld_error =pld_error[nanmask]\n pld_model = pld_model[nanmask]\n centx = centx[nanmask]\n centy = centy[nanmask] \n\n \n PLD_LC = pd.DataFrame({'Time':time, 'SAP Flux':input_flux,'SAP Error':error,\\\n 'PLD Flux':pld_detrended, 'PLD Error': pld_error,\\\n 'PLD Model':pld_model, 'Centroid X Positions':centx,\\\n 'Centroid Y Positions':centy})\n PLD_LC.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_PLD_LC.txt\")\n return PLD_LC #,n_pca_terms\n\n\n\ndef PLD_model(ID,Sector,flux,pix_mask,input_LC,savelcpath,pld_order=3, n_pca_terms=3,cadence_mask=None):\n import warnings,sys\n # warnings.filterwarnings(action='once') #useful to see a warning once but that's it\n warnings.simplefilter(\"ignore\", category=PendingDeprecationWarning)\n if not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n os.environ[\"PYTHONWARNINGS\"] = \"ignore\" # Also affect subprocesses\n \n from numpy.linalg import LinAlgError\n from itertools import combinations_with_replacement as multichoose\n import numpy as np\n \n pld_pixel_mask = pix_mask\n\n time = np.array(input_LC['Time'])\n input_flux = np.array(input_LC['Flux'])\n error = np.array(input_LC['Error'])\n centx = np.array(input_LC[\"Centroid X Positions\"])\n centy = np.array(input_LC[\"Centroid Y Positions\"]) \n\n # create nan mask\n rawflux=input_flux\n rawflux_err=error\n nanmask = np.isfinite(time)\n nanmask &= np.isfinite(rawflux)\n nanmask &= np.isfinite(rawflux_err)\n nanmask &= np.abs(rawflux_err) > 1e-12\n\n # mask out nan values\n rawflux = rawflux[nanmask]\n rawflux_err = rawflux_err[nanmask]\n flux = flux[nanmask]\n # flux_err = flux_err[nanmask] # needs 'FLUX ERROR' from hdu...\n time = time[nanmask]\n\n # parse the PLD aperture mask\n# pld_pixel_mask = self.tpf._parse_aperture_mask(pld_aperture_mask) #not sure what this is\n\n # find pixel bounds of aperture on cutout\n xmin, xmax = min(np.where(pld_pixel_mask)[0]), max(np.where(pld_pixel_mask)[0])\n ymin, ymax = min(np.where(pld_pixel_mask)[1]), max(np.where(pld_pixel_mask)[1])\n\n # crop data cube to include only desired pixels\n # this is required for superstamps to ensure matrix is invertable\n flux_crop = flux[:, xmin:xmax+1, ymin:ymax+1]\n #flux_err_crop = self.flux_err[:, xmin:xmax+1, ymin:ymax+1] # needs 'FLUX ERROR' from hdu...\n aperture_crop = pld_pixel_mask[xmin:xmax+1, ymin:ymax+1]\n\n # first order PLD design matrix\n pld_flux = flux_crop[:, aperture_crop]\n f1 = np.reshape(pld_flux, (len(pld_flux), -1))\n X1 = f1 / np.nansum(pld_flux, axis=-1)[:, None]\n # No NaN pixels\n X1 = X1[:, np.isfinite(X1).all(axis=0)]\n\n # higher order PLD design matrices\n X_sections = [np.ones((len(flux_crop), 1)), X1]\n for i in range(2, pld_order+1):\n f2 = np.product(list(multichoose(X1.T, pld_order)), axis=1).T\n try:\n # We use an optional dependency for very fast PCA (fbpca).\n # If the import fails we will fall back on using the slower `np.linalg.svd`\n from fbpca import pca\n #print('f2: ',np.shape(f2), 'n_pca_terms: ',n_pca_terms)\n components, _, _ = pca(f2, n_pca_terms)\n except ImportError :\n log.error(\"PLD uses the `fbpca` package. You can pip install \"\n \"with `pip install fbpca`. Using `np.linalg.svd` \"\n \"instead.\")\n except AssertionError as ae:\n print(ae)\n print('problem with fbpca, using np.linalg.svd instead')\n print('')\n components, _, _ = np.linalg.svd(f2)\n X_n = components[:, :n_pca_terms]\n X_sections.append(X_n)\n\n # Create the design matrix X by stacking X1 and higher order components, and\n # adding a column vector of 1s for numerical stability (see Luger et al.).\n # X has shape (n_components_first + n_components_higher_order + 1, n_cadences)\n X = np.concatenate(X_sections, axis=1)\n\n # set default transit mask\n if cadence_mask is None:\n cadence_mask = np.ones_like(time, dtype=bool)\n M = lambda x: x[cadence_mask]\n\n # mask transits in design matrix (if requested by user)\n MX = M(X)\n\n\n # compute the coefficients C on the basis vectors;\n # the PLD design matrix will be dotted with C to solve for the noise model.\n ivar = 1.0 / M(rawflux_err)**2 # inverse variance\n A = np.dot(MX.T, MX * ivar[:, None])\n B = np.dot(MX.T, M(rawflux) * ivar)\n\n # apply prior to design matrix weights for numerical stability\n A[np.diag_indices_from(A)] += 1e-8\n ### \n try:\n C=np.linalg.solve(A,B) \n except LinAlgError:\n print('np.linalg.solve gave Singular Matrix problem. Using np.linalg.lstsq')\n C=np.linalg.lstsq(A,B)[0]\n ###\n # compute detrended light curve\n pld_model = np.dot(X, C)\n pld_detrended = rawflux - (pld_model)+ np.nanmedian(rawflux) #as used in Old LK\n pld_error = rawflux_err\n \n# pld_detrended = rawflux/pld_model + np.nanmedian(rawflux)\n# pld_error = rawflux_err/pld_model\n \n nanmask = np.where(np.isfinite(pld_detrended)==True)[0] #make nanmask on last step (PLD/DET/SAP)\n time = time[nanmask]\n input_flux = rawflux[nanmask]\n error = rawflux_err[nanmask]\n pld_detrended = pld_detrended[nanmask]\n pld_error =pld_error[nanmask]\n pld_model = pld_model[nanmask]\n centx = centx[nanmask]\n centy = centy[nanmask] \n \n \n print('PLD len check: T',len(time),' SAPF ',len(rawflux),' SAPE ',len(rawflux_err),' PLD M ',len(pld_model),' PLD F', len(pld_detrended),' PLD E ',len(pld_error),' centx ',len(centx),' centy ',len(centy) )\n PLD_LC = pd.DataFrame({'Time':time, 'SAP Flux':input_flux,'SAP Error':error,\\\n 'PLD Flux':pld_detrended, 'PLD Error': pld_error,\\\n 'PLD Model':pld_model, 'Centroid X Positions':centx,\\\n 'Centroid Y Positions':centy})\n PLD_LC.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_PLD_LC.txt\")\n return PLD_LC\n\n\ndef singleoutliers(data, stepsize=1):\n single=np.split(data, np.where(np.diff(data) != stepsize)[0]+1)\n single = np.array(list(filter(lambda x : len(x) <= 1, single))).flatten() \n single = np.sort(np.array([np.int64(x) for x in single]))\n return single\n\ndef consecutive(data, stepsize=1):\n consec=np.split(data, np.where(np.diff(data) != stepsize)[0]+1)\n c=list(filter(lambda x : len(x) > 1, consec))\n newc=[]\n for x in range(len(c)):\n newc=np.append(newc,c[x])\n consec = np.sort(np.array([np.int64(x) for x in newc]))\n return consec\n\n\ndef outlier_removal_old(ID,Sector,input_LC, remove_outliers, window_size_in_days, Nsigma_low,Nsigma_high,method,savelcpath,verbose): \n import numpy as np\n import pandas as pd\n from scipy import stats\n ###\n ###\n \n #needs to be flexible to allow different combos of PLD, DET and SAP \n #read in input data\n time = np.array(input_LC['Time'])\n sap_flux = np.array(input_LC['SAP Flux'])\n sap_error= np.array(input_LC['SAP Error'])\n det_flux = np.array(input_LC['Detrended Flux'])\n det_error = np.array(input_LC['Detrended Error'])\n trend_lc = np.array(input_LC['Fitted Trend'])\n pld_flux = np.array(input_LC['PLD Flux'])\n pld_error = np.array(input_LC['PLD Error'])\n pld_model = np.array(input_LC['PLD Model']) \n centx = np.array(input_LC['Centroid X Positions']) \n centy = np.array(input_LC['Centroid Y Positions']) \n \n \n preclipLC_df = pd.DataFrame({\"Time\":time, \"SAP Flux\": sap_flux, \"SAP Error\":sap_error, \"Detrended Flux\":det_flux, \"Detrended Error\":det_error,\"Fitted Trend\":trend_lc,\"PLD Flux\":pld_flux,\"PLD Error\":pld_error, \"PLD Model\":pld_model,\"Centroid X Positions\":centx,\"Centroid Y Positions\":centy})\n preclipLC_df.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_PLD_LC_preclipped.txt\",index=False)\n ###\n ### \n if remove_outliers=='yes':\n #needs to be flexible to allow different combos of PLD, DET and SAP \n # use last step in operation's flux (SAP, DET or PLD)\n flux = pld_flux\n error = pld_error\n ###\n if method=='global':\n #global noise level\n# flux_threshold_lo = np.nanmedian(flux)-Nsigma_low*np.nanstd(flux)\n# flux_threshold_hi = np.nanmedian(flux)+Nsigma_high*np.nanstd(flux)\n flux_threshold_lo = np.nanmedian(flux)-Nsigma_low*stats.median_absolute_deviation(flux)\n flux_threshold_hi = np.nanmedian(flux)+Nsigma_low*stats.median_absolute_deviation(flux)\n ###\n cad = np.nanmedian(np.diff(time))\n Npts = int(np.round(((window_size_in_days/cad))+1,1))\n size=len(time)\n window_size=Npts\n if window_size > size:\n window_size = size\n bins = int(size/Npts)+2\n good_ind_lo=[]\n bad_ind_lo=[]\n good_ind_hi=[]\n bad_ind_hi=[]\n for N in range(1,bins): \n# print('Bin '+str(N))\n time_in_window = time[window_size*(N-1):N*window_size]\n flux_in_window = flux[window_size*(N-1):N*window_size]\n err_in_window = error[window_size*(N-1):N*window_size]\n if method==\"local\":\n #local noise\n# flux_threshold_lo = np.nanmedian(flux_in_window)-Nsigma_low*np.nanstd(flux_in_window)\n# flux_threshold_hi = np.nanmedian(flux_in_window)+Nsigma_low*np.nanstd(flux_in_window)\n flux_threshold_lo = np.nanmedian(flux_in_window)-Nsigma_low*stats.median_absolute_deviation(flux_in_window)\n flux_threshold_hi = np.nanmedian(flux_in_window)+Nsigma_low*stats.median_absolute_deviation(flux_in_window)\n ###\n #global noise\n ind_lo = np.where(flux_in_window < flux_threshold_lo)[0]\n ind_hi = np.where(flux_in_window > flux_threshold_hi)[0] \n #print('noise levels')\n #print(ind_lo)\n #print(ind_hi)\n #print(' ')\n ###\n single_lo = singleoutliers(ind_lo)\n single_hi = singleoutliers(ind_hi)\n ###\n consec_lo = consecutive(ind_lo)\n consec_hi = consecutive(ind_hi)\n ###\n #map elements identified in window to full input array\n if len(consec_hi)>0:\n good_ind_hi_temp = np.searchsorted(np.around(time,4), np.around(time_in_window[consec_hi],4),side='left')\n good_ind_hi=np.append(good_ind_hi,good_ind_hi_temp) \n if len(single_hi)>0:\n bad_ind_hi_temp = np.searchsorted(np.around(time,4), np.around(time_in_window[single_hi],4),side='left')\n bad_ind_hi=np.append(bad_ind_hi,bad_ind_hi_temp)\n ###\n if len(consec_lo)>0:\n good_ind_lo_temp = np.searchsorted(np.around(time,4), np.around(time_in_window[consec_lo],4),side='left')\n good_ind_lo=np.append(good_ind_lo,good_ind_lo_temp) \n if len(single_lo)>0:\n bad_ind_lo_temp = np.searchsorted(np.around(time,4), np.around(time_in_window[single_lo],4),side='left')\n bad_ind_lo=np.append(bad_ind_lo,bad_ind_lo_temp)\n ###\n #convert matched elements to intergers\n bad_ind_lo=np.sort(np.array([np.int64(x) for x in bad_ind_lo]))\n bad_ind_hi=np.sort(np.array([np.int64(x) for x in bad_ind_hi]))\n good_ind_lo=np.sort(np.array([np.int64(x) for x in good_ind_lo]))\n good_ind_hi=np.sort(np.array([np.int64(x) for x in good_ind_hi])) \n ###\n ###\n ###\n ### append both high/low results to final arrays\n bad_ind = np.sort(np.append(bad_ind_lo,bad_ind_hi).flatten())\n # good_ind = np.sort(np.append(good_ind_lo,good_ind_hi).flatten())\n ###\n ### MAAAAAYBE we don't want to keep positive outliers after all...\n good_ind = np.sort((good_ind_lo).flatten())\n temp_bad_ind = np.append(bad_ind_lo,bad_ind_hi)\n bad_ind = np.sort(np.append(temp_bad_ind,good_ind_hi).flatten())\n ###\n ###\n bad_ind=np.sort(np.array([np.int64(x) for x in bad_ind])) \n good_ind=np.sort(np.array([np.int64(x) for x in good_ind])) \n ###\n #convert matched elements to intergers\n ###\n ###\n #finally, remove bad points (keep good points) and make new time,flux, error arrays\n ###\n badtime = np.array([i for j, i in enumerate(time) if j in bad_ind])\n badsap_flux = np.array([i for j, i in enumerate(sap_flux) if j in bad_ind])\n baderror = np.array([i for j, i in enumerate(sap_error) if j in bad_ind])\n badflux_detrended = np.array([i for j, i in enumerate(det_flux) if j in bad_ind])\n baddet_error = np.array([i for j, i in enumerate(det_error) if j in bad_ind])\n badtrend_lc = np.array([i for j, i in enumerate(trend_lc) if j in bad_ind])\n badpld_detrended = np.array([i for j, i in enumerate(pld_flux) if j in bad_ind])\n badpld_model = np.array([i for j, i in enumerate(pld_model) if j in bad_ind])\n badpld_error = np.array([i for j, i in enumerate(pld_error) if j in bad_ind])\n badcentx= np.array([i for j, i in enumerate(centx) if j in bad_ind])\n badcenty= np.array([i for j, i in enumerate(centy) if j in bad_ind]) \n \n goodtime = np.array([i for j, i in enumerate(time) if j in good_ind])\n goodsap_flux = np.array([i for j, i in enumerate(sap_flux) if j in good_ind])\n gooderror = np.array([i for j, i in enumerate(sap_error) if j in good_ind])\n goodflux_detrended = np.array([i for j, i in enumerate(det_flux) if j in good_ind])\n gooddet_error = np.array([i for j, i in enumerate(det_error) if j in good_ind])\n goodtrend_lc = np.array([i for j, i in enumerate(trend_lc) if j in good_ind])\n goodpld_detrended = np.array([i for j, i in enumerate(pld_flux) if j in good_ind])\n goodpld_model = np.array([i for j, i in enumerate(pld_model) if j in good_ind])\n goodpld_error = np.array([i for j, i in enumerate(pld_error) if j in good_ind])\n goodcentx= np.array([i for j, i in enumerate(centx) if j in good_ind])\n goodcenty= np.array([i for j, i in enumerate(centy) if j in good_ind])\n \n \n \n time = np.array([i for j, i in enumerate(time) if j not in bad_ind])\n sap_flux = np.array([i for j, i in enumerate(sap_flux) if j not in bad_ind])\n sap_error = np.array([i for j, i in enumerate(sap_error) if j not in bad_ind])\n det_flux = np.array([i for j, i in enumerate(det_flux) if j not in bad_ind])\n det_error = np.array([i for j, i in enumerate(det_error) if j not in bad_ind])\n trend_lc = np.array([i for j, i in enumerate(trend_lc) if j not in bad_ind])\n pld_flux = np.array([i for j, i in enumerate(pld_flux) if j not in bad_ind])\n pld_error = np.array([i for j, i in enumerate(pld_error) if j not in bad_ind])\n pld_model = np.array([i for j, i in enumerate(pld_model) if j not in bad_ind]) \n centx = np.array([i for j, i in enumerate(centx) if j not in bad_ind]) \n centy = np.array([i for j, i in enumerate(centy) if j not in bad_ind]) \n ###\n ###\n ###\n import pandas as pd\n #saving flagged data points\n print('saving flagged outliers')\n bad_ind_DF = pd.DataFrame({\"Time\":badtime, \"SAP Flux\": badsap_flux, \"SAP Error\":baderror, \"Detrended Flux\":badflux_detrended, \"Detrended Error\":baddet_error,\"Fitted Trend\":badtrend_lc,\"PLD Flux\":badpld_detrended,\"PLD Model\":badpld_model,\"PLD Error\":badpld_error,\"Centroid X Positions\":badcentx,\"Centroid Y Positions\":badcenty})\n good_ind_DF = pd.DataFrame({\"Time\":goodtime, \"SAP Flux\": goodsap_flux, \"SAP Error\":gooderror, \"Detrended Flux\":goodflux_detrended, \"Detrended Error\":gooddet_error,\"Fitted Trend\":goodtrend_lc,\"PLD Flux\":goodpld_detrended,\"PLD Model\":goodpld_model,\"PLD Error\":goodpld_error,\"Centroid X Positions\":goodcentx,\"Centroid Y Positions\":goodcenty})\n bad_ind_DF.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_bad_outliers.txt\",index=False)\n good_ind_DF.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_good_outliers.txt\",index=False)\n ###\n ###\n ###\n if verbose==True:\n print('after outlier removal: ',' T', len(time),' Det',len(det_flux), ' trend',len(trend_lc), 'PLD model',len(pld_model),' PLD F',len(pld_flux),' SAP E', len(sap_error))\n ###\n if remove_outliers=='no':\n time = np.ascontiguousarray(time, dtype=np.float64)\n sap_flux = np.ascontiguousarray(sap_flux, dtype=np.float64)\n sap_error = np.ascontiguousarray(sap_error, dtype=np.float64)\n det_flux = np.ascontiguousarray(det_flux, dtype=np.float64)\n det_error= np.ascontiguousarray(det_error, dtype=np.float64)\n trend_lc = np.ascontiguousarray(trend_lc, dtype=np.float64) \n pld_flux = np.ascontiguousarray(pld_flux, dtype=np.float64) \n pld_error = np.ascontiguousarray(pld_error, dtype=np.float64)\n pld_model = np.ascontiguousarray(pld_model, dtype=np.float64)\n centx = np.ascontiguousarray(centx, dtype=np.float64)\n centy = np.ascontiguousarray(centy, dtype=np.float64) \n if verbose==True:\n print('skipping outlier removal:',' T', len(time),' Det',len(det_flux), ' trend',len(trend_lc), 'PLD model',len(pld_model),' PLD F',len(pld_flux),' SAP E', len(sap_error)) \n ###\n ###\n ### \n nanmask = np.where(np.isfinite(pld_flux)==True)[0]\n time = time[nanmask]\n sap_flux = sap_flux[nanmask]\n sap_error=sap_error[nanmask]\n det_flux = det_flux[nanmask]\n det_error=det_error[nanmask]\n trend_lc=trend_lc[nanmask] \n pld_flux = pld_flux[nanmask] \n pld_error=pld_error[nanmask]\n pld_model=pld_model[nanmask]\n centx=centx[nanmask]\n centy=centy[nanmask] \n \n \n #needs to be flexible to allow different combos of PLD, DET and SAP \n LC_df = pd.DataFrame({\"Time\":time, \"SAP Flux\": sap_flux, \"SAP Error\":sap_error, \"Detrended Flux\":det_flux, \"Detrended Error\":det_error,\"Fitted Trend\":trend_lc,\"PLD Flux\":pld_flux,\"PLD Error\":pld_error,\"PLD Model\":pld_model,\"Centroid X Positions\":centx,\"Centroid Y Positions\":centy})\n ###\n LC_df.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_final_LC.txt\",index=False)\n ###\n ###\n return LC_df, good_ind_DF, bad_ind_DF, preclipLC_df\n\n\n\ndef outlier_removal(ID,Sector,input_LC, remove_outliers, Nsigma_low,Nsigma_high,savelcpath,verbose,method='local',window_size_in_days=None): \n import numpy as np\n import pandas as pd\n from scipy import stats\n ###\n ###\n \n #needs to be flexible to allow different combos of PLD, DET and SAP \n #read in input data\n time = np.array(input_LC['Time'])\n sap_flux = np.array(input_LC['SAP Flux'])\n sap_error= np.array(input_LC['SAP Error'])\n det_flux = np.array(input_LC['Detrended Flux'])\n det_error = np.array(input_LC['Detrended Error'])\n trend_lc = np.array(input_LC['Fitted Trend'])\n pld_flux = np.array(input_LC['PLD Flux'])\n pld_error = np.array(input_LC['PLD Error'])\n pld_model = np.array(input_LC['PLD Model']) \n centx = np.array(input_LC['Centroid X Positions']) \n centy = np.array(input_LC['Centroid Y Positions']) \n \n \n preclipLC_df = pd.DataFrame({\"Time\":time, \"SAP Flux\": sap_flux, \"SAP Error\":sap_error, \"Detrended Flux\":det_flux, \"Detrended Error\":det_error,\"Fitted Trend\":trend_lc,\"PLD Flux\":pld_flux,\"PLD Error\":pld_error, \"PLD Model\":pld_model,\"Centroid X Positions\":centx,\"Centroid Y Positions\":centy})\n preclipLC_df.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_PLD_LC_preclipped.txt\",index=False)\n ###\n ### \n \n #defining window for removing outliers in local noise\n LCDur=(np.nanmax(time) - np.nanmin(time))\n maxP = LCDur/2 #longest period for 2 transits in a light curve (~14 days for TESS single sector LCs)\n R_planet_RE = 1\n \n #getting stellar parameters from TIC\n from transitleastsquares import catalog_info\n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n \n \n if window_size_in_days==None:\n # we want to keep the longest transit for an Earth-like planet for a single sector of data\n # using stellar parameters to determine transit duration\n window_size_in_days = 3*Tdur(maxP, R_star,M_star, R_planet_RE)\n \n if remove_outliers=='yes':\n #needs to be flexible to allow different combos of PLD, DET and SAP \n # use last step in operation's flux (SAP, DET or PLD)\n flux = pld_flux\n error = pld_error\n ###\n if method=='global':\n #global noise level\n# flux_threshold_lo = np.nanmedian(flux)-Nsigma_low*np.nanstd(flux)\n# flux_threshold_hi = np.nanmedian(flux)+Nsigma_high*np.nanstd(flux)\n flux_threshold_lo = np.nanmedian(flux)-Nsigma_low*stats.median_absolute_deviation(flux)\n flux_threshold_hi = np.nanmedian(flux)+Nsigma_low*stats.median_absolute_deviation(flux)\n ###\n cad = np.nanmedian(np.diff(time))\n Npts = int(np.round(((window_size_in_days/cad))+1,1))\n size=len(time)\n window_size=Npts\n if window_size > size:\n window_size = size\n bins = int(size/Npts)+2\n good_ind_lo=[]\n bad_ind_lo=[]\n good_ind_hi=[]\n bad_ind_hi=[]\n for N in range(1,bins): \n# print('Bin '+str(N))\n time_in_window = time[window_size*(N-1):N*window_size]\n flux_in_window = flux[window_size*(N-1):N*window_size]\n err_in_window = error[window_size*(N-1):N*window_size]\n if method==\"local\":\n #local noise\n# flux_threshold_lo = np.nanmedian(flux_in_window)-Nsigma_low*np.nanstd(flux_in_window)\n# flux_threshold_hi = np.nanmedian(flux_in_window)+Nsigma_low*np.nanstd(flux_in_window)\n flux_threshold_lo = np.nanmedian(flux_in_window)-Nsigma_low*stats.median_absolute_deviation(flux_in_window)\n flux_threshold_hi = np.nanmedian(flux_in_window)+Nsigma_low*stats.median_absolute_deviation(flux_in_window)\n ###\n #global noise\n ind_lo = np.where(flux_in_window < flux_threshold_lo)[0]\n ind_hi = np.where(flux_in_window > flux_threshold_hi)[0] \n #print('noise levels')\n #print(ind_lo)\n #print(ind_hi)\n #print(' ')\n ###\n single_lo = singleoutliers(ind_lo)\n single_hi = singleoutliers(ind_hi)\n ###\n consec_lo = consecutive(ind_lo)\n consec_hi = consecutive(ind_hi)\n ###\n #map elements identified in window to full input array\n if len(consec_hi)>0:\n good_ind_hi_temp = np.searchsorted(np.around(time,4), np.around(time_in_window[consec_hi],4),side='left')\n good_ind_hi=np.append(good_ind_hi,good_ind_hi_temp) \n if len(single_hi)>0:\n bad_ind_hi_temp = np.searchsorted(np.around(time,4), np.around(time_in_window[single_hi],4),side='left')\n bad_ind_hi=np.append(bad_ind_hi,bad_ind_hi_temp)\n ###\n if len(consec_lo)>0:\n good_ind_lo_temp = np.searchsorted(np.around(time,4), np.around(time_in_window[consec_lo],4),side='left')\n good_ind_lo=np.append(good_ind_lo,good_ind_lo_temp) \n if len(single_lo)>0:\n bad_ind_lo_temp = np.searchsorted(np.around(time,4), np.around(time_in_window[single_lo],4),side='left')\n bad_ind_lo=np.append(bad_ind_lo,bad_ind_lo_temp)\n ###\n #convert matched elements to intergers\n bad_ind_lo=np.sort(np.array([np.int64(x) for x in bad_ind_lo]))\n bad_ind_hi=np.sort(np.array([np.int64(x) for x in bad_ind_hi]))\n good_ind_lo=np.sort(np.array([np.int64(x) for x in good_ind_lo]))\n good_ind_hi=np.sort(np.array([np.int64(x) for x in good_ind_hi])) \n ###\n ###\n ###\n ### append both high/low results to final arrays\n bad_ind = np.sort(np.append(bad_ind_lo,bad_ind_hi).flatten())\n ###\n ### MAAAAAYBE we don't want to keep positive outliers after all...\n # good_ind = np.sort(np.append(good_ind_lo,good_ind_hi).flatten())\n ### MAAAAAYBE we don't want to keep positive outliers after all...\n good_ind = np.sort((good_ind_lo).flatten())\n temp_bad_ind = np.append(bad_ind_lo,bad_ind_hi)\n bad_ind = np.sort(np.append(temp_bad_ind,good_ind_hi).flatten())\n ###\n ###\n bad_ind=np.sort(np.array([np.int64(x) for x in bad_ind])) \n good_ind=np.sort(np.array([np.int64(x) for x in good_ind])) \n ###\n #convert matched elements to intergers\n ###\n ###\n #finally, remove bad points (keep good points) and make new time,flux, error arrays\n ###\n badtime = np.array([i for j, i in enumerate(time) if j in bad_ind])\n badsap_flux = np.array([i for j, i in enumerate(sap_flux) if j in bad_ind])\n baderror = np.array([i for j, i in enumerate(sap_error) if j in bad_ind])\n badflux_detrended = np.array([i for j, i in enumerate(det_flux) if j in bad_ind])\n baddet_error = np.array([i for j, i in enumerate(det_error) if j in bad_ind])\n badtrend_lc = np.array([i for j, i in enumerate(trend_lc) if j in bad_ind])\n badpld_detrended = np.array([i for j, i in enumerate(pld_flux) if j in bad_ind])\n badpld_model = np.array([i for j, i in enumerate(pld_model) if j in bad_ind])\n badpld_error = np.array([i for j, i in enumerate(pld_error) if j in bad_ind])\n badcentx= np.array([i for j, i in enumerate(centx) if j in bad_ind])\n badcenty= np.array([i for j, i in enumerate(centy) if j in bad_ind]) \n \n goodtime = np.array([i for j, i in enumerate(time) if j in good_ind])\n goodsap_flux = np.array([i for j, i in enumerate(sap_flux) if j in good_ind])\n gooderror = np.array([i for j, i in enumerate(sap_error) if j in good_ind])\n goodflux_detrended = np.array([i for j, i in enumerate(det_flux) if j in good_ind])\n gooddet_error = np.array([i for j, i in enumerate(det_error) if j in good_ind])\n goodtrend_lc = np.array([i for j, i in enumerate(trend_lc) if j in good_ind])\n goodpld_detrended = np.array([i for j, i in enumerate(pld_flux) if j in good_ind])\n goodpld_model = np.array([i for j, i in enumerate(pld_model) if j in good_ind])\n goodpld_error = np.array([i for j, i in enumerate(pld_error) if j in good_ind])\n goodcentx= np.array([i for j, i in enumerate(centx) if j in good_ind])\n goodcenty= np.array([i for j, i in enumerate(centy) if j in good_ind])\n \n \n \n time = np.array([i for j, i in enumerate(time) if j not in bad_ind])\n sap_flux = np.array([i for j, i in enumerate(sap_flux) if j not in bad_ind])\n sap_error = np.array([i for j, i in enumerate(sap_error) if j not in bad_ind])\n det_flux = np.array([i for j, i in enumerate(det_flux) if j not in bad_ind])\n det_error = np.array([i for j, i in enumerate(det_error) if j not in bad_ind])\n trend_lc = np.array([i for j, i in enumerate(trend_lc) if j not in bad_ind])\n pld_flux = np.array([i for j, i in enumerate(pld_flux) if j not in bad_ind])\n pld_error = np.array([i for j, i in enumerate(pld_error) if j not in bad_ind])\n pld_model = np.array([i for j, i in enumerate(pld_model) if j not in bad_ind]) \n centx = np.array([i for j, i in enumerate(centx) if j not in bad_ind]) \n centy = np.array([i for j, i in enumerate(centy) if j not in bad_ind]) \n ###\n ###\n ###\n import pandas as pd\n #saving flagged data points\n print('saving flagged outliers')\n bad_ind_DF = pd.DataFrame({\"Time\":badtime, \"SAP Flux\": badsap_flux, \"SAP Error\":baderror, \"Detrended Flux\":badflux_detrended, \"Detrended Error\":baddet_error,\"Fitted Trend\":badtrend_lc,\"PLD Flux\":badpld_detrended,\"PLD Model\":badpld_model,\"PLD Error\":badpld_error,\"Centroid X Positions\":badcentx,\"Centroid Y Positions\":badcenty})\n good_ind_DF = pd.DataFrame({\"Time\":goodtime, \"SAP Flux\": goodsap_flux, \"SAP Error\":gooderror, \"Detrended Flux\":goodflux_detrended, \"Detrended Error\":gooddet_error,\"Fitted Trend\":goodtrend_lc,\"PLD Flux\":goodpld_detrended,\"PLD Model\":goodpld_model,\"PLD Error\":goodpld_error,\"Centroid X Positions\":goodcentx,\"Centroid Y Positions\":goodcenty})\n bad_ind_DF.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_bad_outliers.txt\",index=False)\n good_ind_DF.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_good_outliers.txt\",index=False)\n ###\n ###\n ###\n if verbose==True:\n print('after outlier removal: ',' T', len(time),' Det',len(det_flux), ' trend',len(trend_lc), 'PLD model',len(pld_model),' PLD F',len(pld_flux),' SAP E', len(sap_error))\n ###\n if remove_outliers=='no':\n time = np.ascontiguousarray(time, dtype=np.float64)\n sap_flux = np.ascontiguousarray(sap_flux, dtype=np.float64)\n sap_error = np.ascontiguousarray(sap_error, dtype=np.float64)\n det_flux = np.ascontiguousarray(det_flux, dtype=np.float64)\n det_error= np.ascontiguousarray(det_error, dtype=np.float64)\n trend_lc = np.ascontiguousarray(trend_lc, dtype=np.float64) \n pld_flux = np.ascontiguousarray(pld_flux, dtype=np.float64) \n pld_error = np.ascontiguousarray(pld_error, dtype=np.float64)\n pld_model = np.ascontiguousarray(pld_model, dtype=np.float64)\n centx = np.ascontiguousarray(centx, dtype=np.float64)\n centy = np.ascontiguousarray(centy, dtype=np.float64) \n if verbose==True:\n print('skipping outlier removal:',' T', len(time),' Det',len(det_flux), ' trend',len(trend_lc), 'PLD model',len(pld_model),' PLD F',len(pld_flux),' SAP E', len(sap_error)) \n ###\n ###\n ### \n nanmask = np.where(np.isfinite(pld_flux)==True)[0]\n time = time[nanmask]\n sap_flux = sap_flux[nanmask]\n sap_error=sap_error[nanmask]\n det_flux = det_flux[nanmask]\n det_error=det_error[nanmask]\n trend_lc=trend_lc[nanmask] \n pld_flux = pld_flux[nanmask] \n pld_error=pld_error[nanmask]\n pld_model=pld_model[nanmask]\n centx=centx[nanmask]\n centy=centy[nanmask] \n \n \n #needs to be flexible to allow different combos of PLD, DET and SAP \n LC_df = pd.DataFrame({\"Time\":time, \"SAP Flux\": sap_flux, \"SAP Error\":sap_error, \"Detrended Flux\":det_flux, \"Detrended Error\":det_error,\"Fitted Trend\":trend_lc,\"PLD Flux\":pld_flux,\"PLD Error\":pld_error,\"PLD Model\":pld_model,\"Centroid X Positions\":centx,\"Centroid Y Positions\":centy})\n ###\n LC_df.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_final_LC.txt\",index=False)\n ###\n ###\n return LC_df, good_ind_DF, bad_ind_DF, preclipLC_df\n\n\n\n\n\ndef plot_it_all_up(ID,Sector,cutoutsize,cadence,Nsigma_low,Nsigma_high,hdu,median_image,pix_mask,bkg_mask, RAWLC_df, clippedRAWLC_df,LC_df, good_ind_DF, bad_ind_DF, preclipLC_df, magnitude_limit,dot_scale,path,downloadpath):\n import numpy as np\n #stuff for getting data from MAST\n import astropy\n from astroquery.mast import Catalogs\n from astroquery.mast import Tesscut\n from astropy.coordinates import SkyCoord\n from astroquery.gaia import Gaia\n from astropy.wcs import WCS\n from astropy.io import fits\n import astropy.units as u\n from astropy.coordinates import SkyCoord, Angle\n from astroquery.vizier import Vizier\n from matplotlib import pyplot as plt\n from transitleastsquares import catalog_info\n \n # changing cache directories\n Tesscut.cache_location=downloadpath\n Catalogs.cache_location=downloadpath\n Vizier.cache_location=downloadpath\n \n \n ### Plotting and Saving FFI and selected apertures\n ###\n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist = Get_stellar_params(ID,downloadpath)\n ###\n wcs = WCS(hdu[2].header)\n \n if cadence=='short':\n x=hdu[1].header['1CRPX4']-1\n y=hdu[1].header['2CRPX4']-1\n if cadence=='long':\n x=hdu[1].header['1CRPX4']-1\n y=hdu[1].header['2CRPX4']-1\n CCD=hdu[0].header['CCD']\n Camera=hdu[0].header['Camera']\n reference_pixel=[x,y]\n centx,centy = centroid_quadratic(median_image, pix_mask,reference_pixel)\n ###\n rawtime=np.array(RAWLC_df['Time'].to_list())\n rawflux=np.array(RAWLC_df['SAP Flux'].to_list())\n rawflux_error=np.array(RAWLC_df['SAP Error'].to_list())\n ### \n clippedrawtime=np.array(clippedRAWLC_df['Time'].to_list())\n clippedrawflux=np.array(clippedRAWLC_df['SAP Flux'].to_list())\n clippedrawflux_error=np.array(clippedRAWLC_df['SAP Error'].to_list())\n ### \n ###\n # pre-outlier removed data (slightly better representation of steps)\n time=np.array(preclipLC_df['Time'].to_list())\n sap_flux=np.array(preclipLC_df['SAP Flux'].to_list())\n sap_error=np.array(preclipLC_df['SAP Error'].to_list())\n det_error=np.array(preclipLC_df['Detrended Error'].to_list())\n pld_error=np.array(preclipLC_df['PLD Error'].to_list())\n flux_detrended=np.array(preclipLC_df['Detrended Flux'].to_list())\n trend_lc=np.array(preclipLC_df['Fitted Trend'].to_list())\n pld_detrended=np.array(preclipLC_df['PLD Flux'].to_list())\n pld_model=np.array(preclipLC_df['PLD Model'].to_list())\n ###\n badtime=np.array(bad_ind_DF['Time'].to_list())\n badsapflux=np.array(bad_ind_DF['SAP Flux'].to_list()) \n baddetflux=np.array(bad_ind_DF['Detrended Flux'].to_list()) \n badpldflux=np.array(bad_ind_DF['PLD Flux'].to_list()) \n ###\n goodtime=np.array(good_ind_DF['Time'].to_list())\n goodsapflux=np.array(good_ind_DF['SAP Flux'].to_list())\n gooddetflux=np.array(good_ind_DF['Detrended Flux'].to_list())\n goodpldflux=np.array(good_ind_DF['PLD Flux'].to_list())\n ###\n #final data\n finaltime=np.array(LC_df['Time'].to_list())\n finalflux=np.array(LC_df['Detrended Flux'].to_list())\n finalerror=np.array(LC_df['Detrended Error'].to_list())\n #\n ### \n ###\n print('raw SAP data T ', len(rawtime), ' F ',len(rawflux),' E ', len(rawflux_error))\n ###\n #fontsize\n fs=16\n ###\n fig = plt.figure(figsize=(14,10))\n ax1 = fig.add_subplot(521,projection = wcs)\n ax2 = fig.add_subplot(522,projection = wcs)\n ax3 = fig.add_subplot(512)\n ax4 = fig.add_subplot(513)\n ax5 = fig.add_subplot(514)\n ax6 = fig.add_subplot(515)\n ###\n ###\n axes = [ax1,ax2]\n ###\n ###\n ###\n if cadence=='long':\n savefigpath = path+'FFI_PLD_plots/'\n savelcpath = path+'FFI_PLD_LCs/'\n if cadence=='short':\n savefigpath = path+'TPF_PLD_plots/'\n savelcpath = path+'TPF_PLD_LCs/' \n ###\n axes=[ax1,ax2]\n plot_cutouts(ID,Sector,cadence,hdu,pix_mask,bkg_mask,\\\n reference_pixel,fig,axes,savelcpath,downloadpath)\n# handles, labels = ax2.get_legend_handles_labels()\n# by_label = dict(zip(labels, handles))\n# if len(by_label.values())>4:\n# ncols=2\n# else:\n# ncols=1\n# ax2.legend(by_label.values(), by_label.keys(),bbox_to_anchor=(1.4,1.1),\\\n# loc='upper right',fontsize=8,ncol=ncols,markerscale=1,\\\n# labelspacing=1.,title=\"GAIA Mag\")\n fig.tight_layout(pad=1)\n \n ###\n ###\n ###\n fig.suptitle(r\"TIC \"+str(ID)+\" Sector \"+str(Sector)+\" Camera \"+str(Camera)+\" CCD \"+str(CCD)+\\\n \" $R_{star}$: \"+str(np.round(R_star,3))+\" $R_{\\odot}$ $M_{star}$: \"+str(np.round(M_star,3))+\\\n \" $M_{\\odot}$\"+\"\\n Teff \"+str(Teff)+\" ; TESSmag \"+str(Tmag)+'; Vmag '+str(Vmag),\\\n fontsize = fs,x=0.5,y=1.08)\n ###\n ###\n ###\n ###\n ax3.set_title(\"Simple Aperture Photometry (SAP) Light Curve and PLD Noise Model\",fontsize = fs)\n ###\n ### \n cdpp_sap = CDPP(clippedrawtime,clippedrawflux,clippedrawflux_error,'median','ppm',binsize=(1.0/24.0))\n cdpp_det = CDPP(time,flux_detrended,det_error,'median','ppm',binsize=(1.0/24.0)) \n cdpp_pld = CDPP(time,pld_detrended,pld_error,'median','ppm',binsize=(1.0/24.0)) \n cdpp_final = CDPP(finaltime,finalflux,finalerror,'median','ppm',binsize=(1.0/24.0)) \n ###\n std_sap = 5*np.nanstd(clippedrawflux)\n ###\n ###\n ###\n ax3.set_ylim(np.nanmedian(clippedrawflux)-std_sap,np.nanmedian(clippedrawflux)+std_sap)\n ax3.plot(rawtime,rawflux,marker='.',color='grey',linestyle='none',\\\n label='jitter/glare/Mdumps',zorder=-100)\n ax3.plot(clippedrawtime,clippedrawflux,marker='.',color='black',\\\n linestyle='none',\\\n label=r' SAP Flux'+'\\n CDPP: '+str(np.round(cdpp_sap,2))+' $\\sigma _{ppm}$ ''hr$^{-1/2}$',zorder=-100)\n# ax3.plot(time,trend_lc,color='orange',label='Fitted trend')\n ax3.plot(time,pld_model,'y-',label='PLD model')\n ###\n ###\n# ax4.set_title(\"Detrended Light Curve with Pixel Level Decorrelation (PLD) Model\",fontsize = fs) \n ax4.set_title(\"PLD Corrected Light Curve with Fitted Trend Line\",fontsize = fs) \n# ax4.plot(time,flux_detrended,\"k.\",label=r' Detrended Flux'+'\\n CDPP: '+str(np.round(cdpp_det,2))+' ppm',zorder=-100)\n ax4.plot(time,pld_detrended, \"k.\",\\\n label=r' PLD Flux'+'\\n CDPP: '+str(np.round(cdpp_pld,2))+' $\\sigma _{ppm}$ ''hr$^{-1/2}$',zorder=-100) \n# ax4.plot(time,pld_model,'y-',label='PLD model')\n ax4.plot(time,trend_lc,color='orange',label='Fitted trend')\n ###\n ###\n# ax5.set_title(\"PLD Detrended Light Curve\",fontsize = fs)\n# ax5.plot(time,pld_detrended, \"k.\",label=r' PLD Flux'+'\\n CDPP: '+str(np.round(cdpp_pld,2))+' ppm',zorder=-100)\n ax5.set_title(\"PLD Corrected and Smoothed Light Curve\",fontsize = fs) \n ax5.plot(time,flux_detrended,\"k.\",\\\n label=r' Detrended Flux'+'\\n CDPP: '+str(np.round(cdpp_det,2))+' $\\sigma _{ppm}$ ''hr$^{-1/2}$',zorder=-100) \n ###\n ###\n ax6.set_title(\"PLD Corrected, Smoothed and Outlier Removed Light Curve\",fontsize = fs)\n ax6.plot(finaltime,finalflux, \"k.\",label=r' Final Flux'+'\\n CDPP: '+str(np.round(cdpp_final,2))+' $\\sigma _{ppm}$ ''hr$^{-1/2}$',zorder=-100)\n ###\n# ax3.plot(badtime,badsapflux,'ro',zorder=2,label='bad outliers')\n ax5.plot(badtime,baddetflux,'r.',zorder=2,label='bad outliers')\n ax5.plot(goodtime,gooddetflux,'g.',zorder=2,label='good outliers')\n ###\n ###\n ###\n ###\n ax3.set_xlabel(\"Time (BTJD)\",fontsize=fs)\n# ax3.set_ylabel(\"Normalized Relative flux \",fontsize=fs)\n ax3.legend(loc='best',ncol=2,fontsize=fs-2,fancybox=True,framealpha=1,markerscale=2)\n ax3.tick_params(axis='both', which='major', labelsize=fs)\n ax3.tick_params(axis='both', which='minor', labelsize=fs)\n ###\n ###\n ax4.set_xlabel(\"Time (BTJD)\",fontsize=fs)\n ax4.set_ylabel(\"Normalized Relative flux \",fontsize=fs)\n ax4.legend(loc='best',ncol=2,fontsize=fs-2,fancybox=True,framealpha=1,markerscale=2)\n ax4.tick_params(axis='both', which='major', labelsize=fs)\n ax4.tick_params(axis='both', which='minor', labelsize=fs)\n ###\n ###\n ax5.set_xlabel(\"Time (BTJD)\",fontsize=fs)\n# ax5.set_ylabel(\"Normalized Relative flux \",fontsize=fs)\n ax5.legend(loc='best',ncol=2,fontsize=fs-2,fancybox=True,framealpha=1,markerscale=2)\n ax5.tick_params(axis='both', which='major', labelsize=fs)\n ax5.tick_params(axis='both', which='minor', labelsize=fs) \n ###\n ###\n ax6.set_xlabel(\"Time (BTJD)\",fontsize=fs)\n# ax5.set_ylabel(\"Normalized Relative flux \",fontsize=fs)\n ax6.legend(loc='best',ncol=2,fontsize=fs-2,fancybox=True,framealpha=1,markerscale=2)\n ax6.tick_params(axis='both', which='major', labelsize=fs)\n ax6.tick_params(axis='both', which='minor', labelsize=fs)\n ###\n ###\n \n #ax3.legend(loc='lower right', bbox_to_anchor=(1.175,0.0))\n #ax4.legend(loc='lower right', bbox_to_anchor=(1.175,0.0))\n #ax5.legend(loc='lower right', bbox_to_anchor=(1.175,0.0)) \n #ax6.legend(loc='lower right', bbox_to_anchor=(1.175,0.0))\n \n #\n #ax3.legend(loc='upper right', bbox_to_anchor=(1.175,1.0), bbox_transform=ax3.transAxes)\n #ax4.legend(loc='upper right', bbox_to_anchor=(1.175,1.0), bbox_transform=ax4.transAxes)\n #ax5.legend(loc='upper right', bbox_to_anchor=(1.175,1.0), bbox_transform=ax5.transAxes)\n #ax6.legend(loc='upper right', bbox_to_anchor=(1.175,1.0), bbox_transform=ax6.transAxes)\n \n ax3.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)\n ax4.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)\n ax5.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)\n ax6.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)\n ###\n ### \n mdumps,t_0,t_1 = momentumdump_check(Sector)\n t_0=np.nanmin(time)\n Num_mdumps = int(np.round((np.nanmax(time) - np.nanmin(time))/mdumps,2))+1\n print('') \n ###\n ### \n \n if Sector==31:\n t_0end = 2157.45371\n t_1end = 2169.94398\n time_mdump1 = t_0+ (t_0end - t_0)/2\n time_mdump2 = t_1+ (t_1end - t_1)/2\n #\n ax3.axvline(x=time_mdump1,zorder=-2)\n ax4.axvline(x=time_mdump1,zorder=-2)\n ax5.axvline(x=time_mdump1,zorder=-2)\n ax6.axvline(x=time_mdump1,zorder=-2)\n #\n ax3.axvline(x=time_mdump2,zorder=-2)\n ax4.axvline(x=time_mdump2,zorder=-2)\n ax5.axvline(x=time_mdump2,zorder=-2)\n ax6.axvline(x=time_mdump2,zorder=-2) \n else:\n for N in range(Num_mdumps):\n time_mdump1 = t_0+(N)*mdumps\n time_mdump2 = t_1+(N+0.5)*mdumps \n if time_mdump1 < t_1:\n ax3.axvline(x=time_mdump1,zorder=-2)\n ax4.axvline(x=time_mdump1,zorder=-2)\n ax5.axvline(x=time_mdump1,zorder=-2)\n ax6.axvline(x=time_mdump1,zorder=-2)\n if time_mdump2 < np.nanmax(time):\n ax3.axvline(x=time_mdump2,zorder=-2)\n ax4.axvline(x=time_mdump2,zorder=-2)\n ax5.axvline(x=time_mdump2,zorder=-2) \n ax6.axvline(x=time_mdump2,zorder=-2) \n ###\n ###\n fig.tight_layout(pad=0.1,h_pad=0,w_pad=10)\n fig.savefig(savefigpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_LC_summary.png\",bbox_inches='tight')\n #plt.show()\n plt.close()\n\n\n\n\n###### putting it all together ##########\n###### putting it all together ##########\n###### putting it all together ##########\n\n\n#work on making order of operations flexible\ndef full_pipeline(ID,cutoutsize,Sector,minimum_photon_counts,threshold,pld_order,n_pca_terms, Nsigma_low, Nsigma_high, remove_outliers, before_after_in_minutes, path, cadence, verbose, keep_FITS=True, keep_imagedata=True, window_size_in_days=None,use_SPOC_aperture='no'): \n from transitleastsquares import catalog_info \n import sys\n ###\n #first, check if target has known stellar radius and/or mass:\n from transitleastsquares import catalog_info \n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n if np.isfinite(R_star)==False or np.isfinite(M_star)==False:\n print('TIC '+str(ID)+' has no known Stellar Mass or Radius in TIC')\n return\n else:\n ###\n ###\n ###\n print('TIC '+str(ID)+' Sector '+str(Sector))\n #Step 0: Creating directories to save figures and data\n if verbose==True:\n print('Step 0: Making Directories')\n print(' ')\n path, savefigpath, savelcpath,downloadpath = Make_dirs(path,Sector,cadence)\n ###\n ###\n ###\n #Step 1: Obtaining HDU for FFI/TPF\n if verbose==True:\n print('Step 1: Obtaining HDU from FFI/TPF')\n print(' ')\n try:\n hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize,cadence,minimum_photon_counts,verbose,downloadpath)\n ###\n ### \n print(' ')\n if hdu==None:\n #print('No Image data for TIC '+str(ID)+' in Sector '+ str(Sector)+'!!!')\n sys.exit('No Image data for TIC '+str(ID)+' in Sector '+ str(Sector)+'!!!') \n except AttributeError as AE:\n print(AE)\n sys.exit('No Image data for TIC '+str(ID)+' in Sector '+ str(Sector)+'!!!') \n ###\n ###\n ###\n ###\n if verbose==True:\n print('Step 2: Performing Background Subtraction and Simple Aperture Photometry')\n print(' ')\n try:\n bkg_mask, pix_mask ,flux, median_image, SAP_LC, flux_contamination_ratio = SAP(ID,Sector,cutoutsize,hdu,quality_mask,threshold,cadence,reference_pixel,verbose,savelcpath,use_SPOC_aperture='no')\n ###\n ###\n ###\n except TypeError as TE:\n print(TE)\n print('Unable to create aperture mask. Skipping this target...')\n return \n ###\n ###\n ###\n if len(SAP_LC['SAP Error'])==0:\n print(' ')\n print('Uneven array lengths, FFI likely on edge of detector/partially shown')\n return\n ###\n ###\n if verbose==True:\n print('Step 3: Removing Momentum dumps and regions of high jitter / Earth-Moon glare')\n print(' ')\n mask_mdump, mdumps,t_0,t_1, flux, RAWLC_df, clippedRAWLC_df = Applying_Mdump_removal(ID,Sector,Camera,CCD,before_after_in_minutes,SAP_LC,flux,savelcpath,verbose)\n ###\n ###\n ### saving pixel and background masks and image fluxes\n saveNDarr(pix_mask,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_pix_mask\")\n saveNDarr(bkg_mask,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_bkg_mask\")\n saveNDarr(flux,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_image_fluxes\") \n ###\n ### calculating centroid positions throughout images and resaving to file\n cxs,cys = check_centroids(ID,Sector,cutoutsize,cadence,reference_pixel,savelcpath)\n time = np.array(clippedRAWLC_df['Time'])\n sap_flux=np.array(clippedRAWLC_df['SAP Flux'])\n sap_error=np.array(clippedRAWLC_df['SAP Error'])\n bkg_flux=np.array(clippedRAWLC_df['Background Flux'])\n clippedRAWLC_df = pd.DataFrame({\"Time\":time, \"SAP Flux\": sap_flux, \"SAP Error\":sap_error,\"Background Flux\":bkg_flux, \"Centroid X Positions\":cxs,\"Centroid Y Positions\":cys})\n clippedRAWLC_df.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_RAW_LC_systematics_removed.txt\",index=False)\n ###\n if verbose==True:\n print('Step 4: Performing Pixel Level Decorrelation modeling')\n print(' ')\n ###\n ## work on making this flexible to take either PLD or SAP\n input_LC = pd.DataFrame({'Time':np.array(clippedRAWLC_df['Time']),\\\n 'Flux':np.array(clippedRAWLC_df['SAP Flux']),\\\n 'Error':np.array(clippedRAWLC_df['SAP Error']),\\\n \"Centroid X Positions\":np.array(clippedRAWLC_df[\"Centroid X Positions\"]),\\\n \"Centroid Y Positions\":np.array(clippedRAWLC_df[\"Centroid Y Positions\"])})\n\n PLD_LC = PLD_model(ID,Sector,flux,pix_mask,input_LC,savelcpath,pld_order=pld_order, n_pca_terms=n_pca_terms)\n ###\n ###\n ###\n if verbose==True:\n print('Step 5: Applying smoothing filter')\n print(' ') \n print('len check for step 5:')\n print('PLD T',len(np.array(PLD_LC['Time'])),'PLD F',len(np.array(PLD_LC['PLD Flux'])),'PLD E',len(np.array(PLD_LC['PLD Error'])))\n ## work on making this flexible to take either DET or SAP\n input_LC2 = pd.DataFrame({'Time':np.array(PLD_LC['Time']),\\\n 'Flux':np.array(PLD_LC['PLD Flux']),\\\n 'Error':np.array(PLD_LC['PLD Error']),\\\n 'Model':np.array(PLD_LC['PLD Model']),\\\n 'SAP Flux':np.array(input_LC['Flux']),\\\n 'SAP Error':np.array(input_LC['Error']),\\\n \"Centroid X Positions\":np.array(PLD_LC[\"Centroid X Positions\"]),\\\n \"Centroid Y Positions\":np.array(PLD_LC[\"Centroid Y Positions\"])})\n Det_LC, nanmask = BWMC_auto(ID,Sector,input_LC2,savelcpath) \n ###\n ### ensure PLD outputs and Detrended outputs have same length using nanmask output\n print('2nd len check for step 5: ')\n # print('T: ',len(Det_LC['Time']),'SAP F: ',len(PLD_LC['SAP Flux']), 'SAP E: ',len(PLD_LC['SAP Error']) ,\\\n # ' PLD F: ', len(PLD_LC['PLD Flux']),' PLD E: ', len(PLD_LC['PLD Error']),\\\n # ' Det F: ',len(Det_LC['Detrended Flux']), 'Det E: ',len(Det_LC['Detrended Error'])) \n # PLD_LC = pd.DataFrame({'Time':np.array(Det_LC['Time']),'SAP Flux':(PLD_LC['SAP Flux'])[nanmask],\\\n # 'SAP Error':(PLD_LC['SAP Error'])[nanmask],'PLD Flux':np.array(PLD_LC['PLD Flux'])[nanmask],\\\n # 'PLD Error':np.array(PLD_LC['PLD Error'])[nanmask],\\\n # 'PLD Model':np.array(PLD_LC['PLD Model'])[nanmask],\\\n # 'Centroid X Positions':np.array(PLD_LC['Centroid X Positions']),\\\n # 'Centroid Y Positions':np.array(PLD_LC['Centroid Y Positions'])})\n print('T: ',len(Det_LC['Time']),'SAP F: ',len(Det_LC['SAP Flux']), 'SAP E: ',len(Det_LC['SAP Error']),' PLD F: ', len(Det_LC['PLD Flux']),' PLD E: ', len(Det_LC['PLD Error']),' PLD M: ', len(Det_LC['PLD Model']),' Det F: ',len(Det_LC['Detrended Flux']), 'Det E: ',len(Det_LC['Detrended Error']),' Det M: ',len(Det_LC['Fitted Trend'])) \n ###\n ###\n ###\n if verbose==True:\n print('Step 6: Applying Outlier Removal (if set to \"yes\")')\n print(' ')\n ###\n #this needs to be the MOST flexible part to deal with combos of PLD, DET and SAP\n print('len check for step 6:')\n print('T: ',len(Det_LC['Time']),'SAP F: ',len(Det_LC['SAP Flux']), 'SAP E: ',len(Det_LC['SAP Error']),' PLD F: ', len(Det_LC['PLD Flux']),' PLD E: ', len(Det_LC['PLD Error']),' PLD M: ', len(Det_LC['PLD Model']),' Det F: ',len(Det_LC['Detrended Flux']), 'Det E: ',len(Det_LC['Detrended Error']),' Det M: ',len(Det_LC['Fitted Trend'])) \n input_LC3 = pd.DataFrame({'Time':np.array(Det_LC['Time']),\\\n 'SAP Flux':np.array(Det_LC['SAP Flux']),\\\n 'SAP Error':np.array(Det_LC['SAP Error']),\\\n 'Detrended Flux':np.array(Det_LC['Detrended Flux']),\\\n 'Detrended Error':np.array(Det_LC['Detrended Error']),\\\n 'Fitted Trend':np.array(Det_LC['Fitted Trend']),\\\n 'PLD Flux':np.array(Det_LC['PLD Flux']),\\\n 'PLD Error':np.array(Det_LC['PLD Error']),\\\n 'PLD Model':np.array(Det_LC['PLD Model']),\\\n \"Centroid X Positions\":np.array(Det_LC[\"Centroid X Positions\"]),\\\n \"Centroid Y Positions\":np.array(Det_LC[\"Centroid Y Positions\"])})\n ###\n ###\n LC_df, good_ind_DF, bad_ind_DF, preclipLC_df = outlier_removal(ID,Sector,input_LC3, remove_outliers, Nsigma_low,Nsigma_high,savelcpath,verbose,window_size_in_days=window_size_in_days) \n ###\n ###\n ###\n ###\n ###\n ###\n if verbose==True:\n print('Step 7: Plotting and Saving FFI and selected apertures')\n print(' ')\n plot_it_all_up(ID,Sector,cutoutsize,cadence,Nsigma_low,Nsigma_high,\\\n hdu,median_image,pix_mask,bkg_mask, RAWLC_df, \\\n clippedRAWLC_df, LC_df, good_ind_DF, bad_ind_DF, preclipLC_df, \\\n magnitude_limit=18,dot_scale=20,path=path,downloadpath=downloadpath)\n ###\n ###\n if keep_FITS==False:\n # deleting FITS files (no longer need them for light curve processing\n # can always download again)\n os.system(\"rm -r \" + downloadpath) #delete cache path\n ###\n ###\n print('FINAL LENGTHS :', ' T', len(LC_df['Time']),' Det F',len(LC_df['Detrended Flux']), ' trend',len(LC_df['Fitted Trend']), 'PLD model',len(LC_df['PLD Model']),' PLD F',len(LC_df['PLD Flux']),' SAP E', len(LC_df['SAP Error']))\n\n###### putting it all together ##########\n###### putting it all together ##########\n###### putting it all together ##########\n\n\n\n\n\n\n\n\n##########################################\n##########################################\n##########################################\n####### TRANSIT SEARCH FUNCTIONS #########\n##########################################\n##########################################\n##########################################\n\n\ndef phasefold(T0,time,period,flux):\n phase=(time- T0 + 0.5*period) % period - 0.5*period \n ind=np.argsort(phase, axis=0)\n return phase[ind],flux[ind]\n\n\ndef MoreThan_N_Transits(ID,time,flux,error,T_C_array,T_C,planet_model,planet_model_time,\\\n Period,xwidth,spacing,window_size,T_C_x_position,T_C_y_position,\\\n fontsize,markersize,axis,XLIM,TLS_Dur):\n ###\n #new\n spacing = 1.0-np.nanmin(planet_model) #sort of like transit depth\n spacing=0.5*spacing\n# print('spacing: ',spacing)\n if spacing<0.01:\n spacing=0.005\n xshift=1.1\n yshift=0.1\n XLIM=0-0.5*TLS_Dur#1.75*TLS_Dur\n ###\n ###\n for x in range(len(T_C_array)):\n Even=[]\n Evenmodel=[]\n Odd=[]\n Oddmodel=[]\n if x %2 ==0: #even\n ###\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n cut2 = np.where( ((T_C_array[x]-window_size) < planet_model_time) & ((T_C_array[x]+window_size) > planet_model_time) )[0]\n cut_model_f = planet_model[cut2]\n cut_model_t = planet_model_time[cut2]\n ###\n if len(cut_f)<1:\n# print('cut window too small, switching to 1.5 hr window')\n window_size=1.5\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n cut2 = np.where( ((T_C_array[x]-window_size) < planet_model_time) & ((T_C_array[x]+window_size) > planet_model_time) )[0]\n cut_model_f = planet_model[cut2]\n cut_model_t = planet_model_time[cut2]\n ###\n phase,_ = phasefold(T_C_array[x],cut_t,Period,cut_f)\n Even=np.append(Even,phase)\n pf_model,_ = phasefold(T_C_array[x],cut_model_t,Period,cut_model_f)\n Evenmodel=np.append(Evenmodel,pf_model)\n xxxx=1\n axis.plot(24*Even,cut_f,color='dimgrey',marker='o',linestyle='none',markersize=markersize+1, rasterized=True)\n axis.plot(24*Evenmodel,cut_model_f,'r.-',markersize=markersize-1, rasterized=True)\n ###\n else: #odd\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n cut2 = np.where( ((T_C_array[x]-window_size) < planet_model_time) & ((T_C_array[x]+window_size) > planet_model_time) )[0]\n cut_model_f = planet_model[cut2]\n cut_model_t = planet_model_time[cut2]\n ###\n if len(cut_f)<1:\n# print('cut window too small, switching to 1.5 hr window')\n window_size=1.5\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n cut2 = np.where( ((T_C_array[x]-window_size) < planet_model_time) & ((T_C_array[x]+window_size) > planet_model_time) )[0]\n cut_model_f = planet_model[cut2]\n cut_model_t = planet_model_time[cut2]\n ###\n phase,_ = phasefold(T_C_array[x],cut_t,Period,cut_f)\n Odd=np.append(Odd,phase)\n pf_model,_ = phasefold(T_C_array[x],cut_model_t,Period,cut_model_f)\n Oddmodel=np.append(Oddmodel,pf_model)\n xxxx=xxxx+2\n axis.plot(24*Odd,cut_f+spacing*(xxxx),color='lightblue',marker='o',linestyle='none',markersize=markersize+1, rasterized=True)\n axis.plot(24*Oddmodel,cut_model_f+spacing*(xxxx),'r.-',markersize=markersize-1, rasterized=True)\n ###\n axis.annotate(\"Odd\", xy=( XLIM, np.nanmean(cut_model_f+spacing*((xxxx+0.5)))+T_C_y_position ), va='top',xycoords='data', fontsize=fontsize+4,weight=\"bold\")\n axis.annotate(\"Even\", xy=( XLIM, np.nanmean(cut_model_f)-spacing*(1)+T_C_y_position ), va='top',xycoords='data', fontsize=fontsize+4,weight=\"bold\")\n ###\n ymax = np.nanmax(cut_f+spacing*4)#(xxxx+1)*1.5) \n ymin = np.nanmin(cut_f-spacing*2) \n# print(ymin,ymax)\n return ymin,ymax\n\n\n\ndef LessThan_N_Transits(ID,time,flux,error, T_C_array,T_C,planet_model,planet_model_time,\\\n Period,xwidth,spacing,window_size,T_C_x_position,T_C_y_position,\\\n fontsize,markersize,axis,XLIM,TLS_Dur):\n XLIM=0-TLS_Dur#float(10.0*TLS_Dur)#-0.5*TLS_Dur\n #print(XLIM)\n #alter text position and size:\n T_C_x_position=T_C_x_position+1.25\n #print(T_C_x_position)\n fontsize=fontsize-4\n shift=0.75\n ###\n spacing = 1.0-np.nanmin(planet_model) #sort of like transit depth\n# spacing=2.5*spacing\n spacing=0.5*spacing\n if spacing<0.01:\n spacing=0.005\n ###\n for x in range(len(T_C_array)):\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n cut2 = np.where( ((T_C_array[x]-window_size) < planet_model_time) & ((T_C_array[x]+window_size) > planet_model_time) )[0]\n cut_model_f = planet_model[cut2]\n cut_model_t = planet_model_time[cut2]\n if len(cut_f)<1:\n print('cut window too small, switching to 1.5 hr window')\n window_size=1.5\n cut = np.where(((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time))[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n cut2 = np.where( ((T_C_array[x]-window_size) < planet_model_time) & ((T_C_array[x]+window_size) > planet_model_time) )[0]\n cut_model_f = planet_model[cut2]\n cut_model_t = planet_model_time[cut2]\n ###\n phase,cut_f = phasefold(T_C_array[x],cut_t,Period,cut_f)\n pf_model,cut_model_f = phasefold(T_C_array[x],cut_model_t,Period,cut_model_f)\n if x % 2 == 0: #alternate colors\n xxxx = x+1\n axis.plot(24*phase,cut_f+spacing*(xxxx),color='dimgrey',marker='o',linestyle='-',markersize=markersize+1, rasterized=True)\n axis.plot(24*pf_model,cut_model_f+spacing*(xxxx),'r.-',markersize=markersize-1, rasterized=True) \n axis.annotate(str(np.round(T_C_array[x],3))+\" BTJD\", xy=( XLIM, np.nanmean(cut_model_f+spacing*(xxxx+shift)-T_C_y_position) ), va='top',xycoords='data', fontsize=fontsize,weight=\"bold\") \n else:\n xxxx = x+1\n axis.plot(24*phase,cut_f+spacing*(xxxx),color='lightblue',marker='o',linestyle='-',markersize=markersize+1,rasterized=True)\n axis.plot(24*pf_model,cut_model_f+spacing*(xxxx),'r.-',markersize=markersize-1, rasterized=True) \n axis.annotate(str(np.round(T_C_array[x],3))+\" BTJD\", xy=( XLIM, np.nanmean(cut_model_f+spacing*(xxxx+shift)-T_C_y_position) ), va='top',xytext=(XLIM, np.nanmean(cut_model_f+spacing*(xxxx+shift)-T_C_y_position)),xycoords='data', fontsize=fontsize,weight=\"bold\")\n ymax = np.nanmax(cut_model_f+spacing*(len(T_C_array)+1)) \n ymin = np.nanmin(cut_model_f+spacing*(len(T_C_array)-5)) \n return ymin,ymax\n\n\ndef phasematch_and_seperate_plot_TLS(ID,time,flux,error, T_C_array,T_C,planet_model,planet_model_time,\n Period,xwidth,spacing,window_size,\n T_C_x_position,T_C_y_position,fontsize,markersize,axis,TLS_Dur):\n ###\n from matplotlib.offsetbox import AnchoredText\n N=5 #number of transits\n if len(T_C_array) > N:\n ymin,ymax = MoreThan_N_Transits(ID,time,flux,error, T_C_array,T_C,planet_model,planet_model_time,\n Period,xwidth,spacing,window_size,\n T_C_x_position,T_C_y_position,fontsize,markersize,axis,0,TLS_Dur)\n if len(T_C_array) <= N:\n ymin,ymax = LessThan_N_Transits(ID,time,flux,error, T_C_array,T_C,planet_model,planet_model_time,\n Period,xwidth,spacing,window_size,\n T_C_x_position,T_C_y_position,fontsize,markersize,axis,0,TLS_Dur)\n return ymin,ymax\n\ndef Vet_with_EDIVetter(ID, Sector, TLS_OUTPUT, qld, SDE_threshold, N_transits):\n import EDIunplugged as EDI\n import pandas as pd\n params=EDI.parameters(TLS_OUTPUT,limbDark=[qld[0], qld[1]], impact=0, snrThreshold=SDE_threshold, minTransit=N_transits)\n params=EDI.Go(params,telescope='TESS')\n \n EDI_results = pd.DataFrame({'ID':ID,'Sector':Sector,'Flux Contamination':params.fluxContaminationFP, 'Too Many Transits Masked':params.TransMaskFP, 'Odd/Even Transit Variation':params.even_odd_transit_misfit,'Signal is not Unique':params.uniquenessFP,'Secondary Eclipse Found':params.SeFP,'Low Transit Phase Coverage':params.phaseCoverFP, 'Transit Duration Too Long':params.tdurFP, 'Signal is a False Positive':params.FalsePositive},index=[0])\n return EDI_results\n\ndef TLS_func(ID,Sector,cadence,time,flux,error,N_transits,minP,oversampling_factor,duration_grid_step,path,for_injections=False):\n #for reporting TLS model's planet radius\n R_earth = 6.378*10.0**8.0 #cm\n R_sun = 6.955*10.0**10.0 #cm\n ###\n ###\n # calculate CDPP (in ppm per sqrt hour) to save later\n cdpp = CDPP(time,flux,error,'median','ppm',binsize=(1.0/24.0)) \n time_span = np.nanmax(time) - np.nanmin(time) \n #\n from transitleastsquares import catalog_info \n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n from transitleastsquares import transitleastsquares\n ###\n maxP= (max(time)-min(time))/N_transits #length of our light curve\n if cadence=='long':\n T0_fit_margin=0 #samples every data point\n else:\n T0_fit_margin = 0.001\n ###\n if np.isfinite(R_star)==True and np.isfinite(M_star)==True:\n ###\n tls = transitleastsquares(time,flux,error)\n if np.isnan(R_star_min)==True:\n print('R_star_min = NaN')\n print(' ')\n ###\n tls = transitleastsquares(time,flux,error)\n tls_power = tls.power(period_min=minP,period_max=maxP,n_transits_min=N_transits,\\\n oversampling_factor=oversampling_factor,\\\n duration_grid_step=duration_grid_step,\\\n T0_fit_margin=T0_fit_margin,show_progress_bar=False)\n else:\n tls_power = tls.power(R_star_min=R_star-R_star_min, R_star_max=R_star+R_star_max,R_star=R_star,\\\n M_star_min=M_star-M_star_min, M_star_max=M_star+M_star_max,M_star=M_star,\\\n u=qld,period_min=minP,period_max=maxP,\\\n n_transits_min=N_transits,\\\n oversampling_factor=oversampling_factor,\\\n duration_grid_step=duration_grid_step,\\\n T0_fit_margin=T0_fit_margin,show_progress_bar=False) \n ### \n ###\n tls_power_periods = tls_power.periods\n ###\n #TLS results\n TLS_periods=tls_power.period\n TLS_periods_uncertainty=tls_power.period_uncertainty \n TLS_odd_even = tls_power.odd_even_mismatch \n TLS_FAP = tls_power.FAP \n TLS_t0s=tls_power.T0\n TLS_depths=tls_power.depth\n TLS_Power=tls_power.power\n TLS_sde=tls_power.SDE #top peak\n TLS_Dur = tls_power.duration\n TLS_TCs = tls_power.transit_times\n TLS_SR = tls_power.SR\n TLS_SDE = (TLS_Power-np.nanmedian(TLS_Power))/np.nanstd(TLS_Power)\n \n # check TLS_SDE, if nan mean = nan, this is a junk result, likely due to noisy lightcurve\n if len(np.where(np.isnan(TLS_SDE)==True)[0])==len(TLS_SDE):\n print('TLS unabe to converge to solution. Check light curve, likely high CDPP.') \n return None, None, None , None\n ### Vet with EDI-Vetter\n EDI_results=Vet_with_EDIVetter(ID, Sector, tls_power, qld, SDE_threshold=6, N_transits=N_transits)\n \n TLS_depths_arr=tls_power.transit_depths #array of mean depths\n TLS_depths_arr_uncertainty=tls_power.transit_depths_uncertainties\n #making assumption that median of array of mean depth errors is representative of depth error\n TLS_depths_err = np.nanmedian(TLS_depths_arr)/len(TLS_depths_arr) #new,float\n \n TLS_rp_rs = tls_power.rp_rs #new, float\n TLS_snr = tls_power.snr #new, float\n TLS_transit_count= tls_power.transit_count #new, int\n TLS_distinct_transit_count = tls_power.distinct_transit_count #new, int \n TLS_per_transit_count= tls_power.per_transit_count #new, array\n TLS_snr_per_transit = tls_power.snr_per_transit #new, array\n TLS_snr_pink_per_transit = tls_power.snr_pink_per_transit #new. array\n ###\n #calculating TLS estimated planet radius and error\n R_p = np.sqrt(1.0-TLS_depths)*R_star*R_sun/R_earth\n R_star_err = R_star_min\n R_p_err = R_p * np.sqrt((R_star_err/R_star)**2 + (TLS_depths_err/TLS_depths)**2)\n ###\n #TLS models\n TLS_model_time = tls_power.model_lightcurve_time \n TLS_model = tls_power.model_lightcurve_model \n ###\n ### \n ### \n ###\n #saving results\n if for_injections==False:\n Path=path+'Sector_'+str(Sector)+'/'\n if for_injections==True:\n Path=path\n if cadence=='long':\n saveReportpath = Path+'FFI_TLS_Report/'\n if cadence=='short': \n saveReportpath = Path+'TPF_TLS_Report/'\n ###\n if os.path.exists(saveReportpath)==True:\n # print('folder exists, moving on...') #feel free to uncomment these out\n pass\n else:\n # print('making directory') \n os.makedirs(saveReportpath)\n ###\n import pandas as pd\n ###\n TLSdf = pd.DataFrame({\"TLS Periods\":tls_power_periods, \"TLS Power\": TLS_Power, \"TLS SR\":TLS_SR, \"TLS SDE\":TLS_SDE})\n TLSdf.to_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_TLS.txt\",index=False)\n ###\n TLSmodeldf = pd.DataFrame({\"Time\":TLS_model_time, \"Model\": TLS_model})\n TLSmodeldf.to_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_TLS_model.txt\",index=False)\n ###\n EDI_results.to_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_EDI_results.txt\",index=[0])\n ###\n ### \n# from NEMESIS_pipeline import Make_dirs, Get_stellar_params, gethdu, momentumdump_check\n savefigpath1 = Path+'FFI_PLD_plots/'\n savelcpath1 = Path+'FFI_PLD_LCs/'\n savefigpath2 = Path+'TPF_PLD_plots/'\n savelcpath2 = Path+'TPF_PLD_LCs/' \n downloadpath = Path+'cache/'\n\n if cadence=='long':\n savefigpath=savefigpath1\n savelcpath=savelcpath1\n downloadpath=downloadpath\n\n if cadence=='short': \n savefigpath=savefigpath2\n savelcpath=savelcpath2\n downloadpath=downloadpath\n ###\n# hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,minimum_photon_counts=1,verbose=True,downloadpath=downloadpath)\n# if keep_FITS==False:\n# # deleting FITS files (no longer need them for light curve processing\n# # can always download again)\n# os.system(\"rm -r \" + downloadpath) #delete cache path\n ###\n Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist = Get_stellar_params(ID,downloadpath)\n mdumps,t_0,t_1 = momentumdump_check(Sector)\n #calculate flux contamination of nearby stars within 3 TESS pixels\n flux_contamination_ratio = calc_flux_contamination(ID)\n ### \n ### \n ### \n TLSbestfitdf = pd.DataFrame({\"TLS Period [d]\":TLS_periods, \"TLS Period Error\":TLS_periods_uncertainty,\"TLS TC [BTJD]\": TLS_t0s, \"TLS depths [ppt]\":(1-TLS_depths)*1000,\"TLS depth Error\":TLS_depths_err/1000,\"TLS SDE\":TLS_sde, \"TLS SNR\":TLS_snr,\"TLS FAP\":TLS_FAP,\"TLS Dur [hrs]\":TLS_Dur*24, \"TLS Transit Count\":TLS_transit_count,\"TLS Distinct Transit Count\":TLS_distinct_transit_count,\"TLS Odd Even Mismatch\":TLS_odd_even,\"RP_RS\":TLS_rp_rs,\"Planet Radius [RE]\":R_p,'Planet Radius Error':R_p_err,\"CDPP [ppm/sqrt hr]\": cdpp, 'Time Span [d]':time_span,'Stellar Radius [RS]':R_star, \"Stellar Mass [MS]\":M_star,\"Teff [K]\":Teff,\"Flux Contamination Ratio\":flux_contamination_ratio, \"Vmag\":Vmag,\"TESSmag\":Tmag,\"rmag\":rmag,\"imag\":imag,\"zmag\":zmag,\"Jmag\":Jmag,\"Hmag\":Hmag, \"Kmag\":Kmag,\"Momentum Dump Rate [d]\":mdumps,\"RA\":ra, \"DEC\":dec,\"logg\":logg,\"rho [g/ccm]\":rho,\"dist [pc]\":dist}, index=[0])\n ### \n TLSbestfitdf.to_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_TLS_bestfit.txt\",index=False)\n ###\n TLSTCsdf = pd.DataFrame({\"TLS TCs [BTJD]\":TLS_TCs,\"TLS Depths\":(1-TLS_depths_arr)*1000,\"TLS Depths Error\":TLS_depths_arr_uncertainty,\"SNR Per Transit\":TLS_snr_per_transit,\"SNR Pink Per Transit\":TLS_snr_pink_per_transit,})\n ### \n TLSTCsdf.to_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_TLS_TCs.txt\",index=False)\n ###\n return TLSdf, TLSmodeldf, TLSbestfitdf,TLSTCsdf\n else:\n print(\" \")\n print(\"NaNs in mass or radius\")\n print(\"Stellar Mass: \",M_star,\" Radius: \", R_star)\n print(\" \")\n pass\n\n#for determining BLS Period Error from HWHM of Peak in Power Spectrum\ndef peak(x, c):\n return np.exp(-np.power(x - c, 2) / 16.0)\ndef lin_interp(x, y, i, half):\n return x[i] + (x[i+1] - x[i]) * ((half - y[i]) / (y[i+1] - y[i]))\ndef HWHM(x, y):\n half = max(y)/2.0\n signs = np.sign(np.add(y, -half))\n zero_crossings = (signs[0:-2] != signs[1:-1])\n zero_crossings_i = np.where(zero_crossings)[0]\n hmx_left_right= [lin_interp(x, y, zero_crossings_i[0], half),\n lin_interp(x, y, zero_crossings_i[1], half)]\n HWHM=0.5*(hmx_left_right[1] - hmx_left_right[0])\n return HWHM\n\ndef BLS_func(ID,Sector,cadence,time,flux,error,N_transits,minP,oversampling_factor,duration_grid_step,path,for_injections=False):\n from astropy.timeseries import BoxLeastSquares\n #for reporting BLS model's planet radius\n R_earth = 6.378*10.0**8.0 #cm\n R_sun = 6.955*10.0**10.0 #cm\n ###\n ###\n # calculate CDPP (in ppm per sqrt hour) to save later\n cdpp = CDPP(time,flux,error,'median','ppm',binsize=(1.0/24.0)) \n time_span = np.nanmax(time) - np.nanmin(time)\n # \n from transitleastsquares import period_grid,duration_grid, catalog_info\n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n\n # First lets make the grid\n LCduration = np.nanmax(time) - np.nanmin(time) #duration of light curve\n\n # lets pick min/max of orbital period grid\n maxP = LCduration/N_transits #orbital periods for grid\n\n periods = period_grid(R_star=R_star, M_star=M_star, time_span=LCduration, period_min=minP, period_max=maxP,oversampling_factor=oversampling_factor)\n durations= duration_grid(periods,shortest=None,log_step=duration_grid_step) \n #shortest is unused in source code definition (why is it there?) \n ###\n if np.isfinite(R_star)==True and np.isfinite(M_star)==True:\n\n #start BLS search\n bls = BoxLeastSquares(time, flux, error) \n bls_power = bls.power(periods, durations)\n\n BLS_SDE = (bls_power.power - np.nanmean(bls_power.power))/np.nanstd(bls_power.power)\n \n #BLS results\n index = np.argmax(BLS_SDE) #finds strongest peak in BLS power spectrum\n BLS_Period=bls_power.period[index]\n try:\n BLS_Period_err = HWHM(bls_power.period[::-1],BLS_SDE)\n except (IndexError, ValueError) as E:\n BLS_Period_err = np.nan \n BLS_T0=bls_power.transit_time[index]\n BLS_Depth=bls_power.depth[index]\n BLS_Depth_err=bls_power.depth_err[index]\n BLS_Dur = bls_power.duration[index]\n BLS_sde = BLS_SDE[index]\n \n #count transit times based on T0 and P\n from transitleastsquares.stats import all_transit_times\n BLS_transit_times = all_transit_times(BLS_T0, time, BLS_Period)\n BLS_transit_count = len(BLS_transit_times)\n \n R_p = np.sqrt(BLS_Depth)*R_star*R_sun/R_earth\n R_star_err = R_star_min\n R_p_err=R_p * np.sqrt((R_star_err/R_star)**2 + (BLS_Depth_err/BLS_Depth)**2)\n BLS_model=bls.model(time, BLS_Period, BLS_Dur, BLS_T0) \n \n #saving results\n if for_injections==False:\n Path=path+'Sector_'+str(Sector)+'/'\n if for_injections==True:\n Path=path\n if cadence=='long':\n saveReportpath = Path+'FFI_BLS_Report/'\n if cadence=='short': \n saveReportpath = Path+'TPF_BLS_Report/'\n ###\n if os.path.exists(saveReportpath)==True:\n # print('folder exists, moving on...') #feel free to uncomment these out\n pass\n else:\n # print('making directory') \n os.makedirs(saveReportpath)\n ###\n import pandas as pd\n \n BLSdf = pd.DataFrame({\"BLS Periods\":bls_power.period, \"BLS Power\": bls_power.power, \"BLS SDE\":BLS_SDE})\n \n #check BLS Power Spectrum for oddities (like infinite peaks/depths)\n if np.nanmin(bls_power.power)==-np.inf or np.nanmax(bls_power.power)==np.inf:\n print('')\n print('PROBLEM with BLS: infinite values in Power. Check Light Curve for weird effects.')\n return None,None,None\n \n BLSmodeldf = pd.DataFrame({\"Time\":time , \"Model\": BLS_model})\n \n# from NEMESIS_pipeline import Make_dirs, Get_stellar_params, gethdu, momentumdump_check\n savefigpath1 = Path+'FFI_PLD_plots/'\n savelcpath1 = Path+'FFI_PLD_LCs/'\n savefigpath2 = Path+'TPF_PLD_plots/'\n savelcpath2 = Path+'TPF_PLD_LCs/' \n downloadpath = Path+'cache/'\n\n if cadence=='long':\n savefigpath=savefigpath1\n savelcpath=savelcpath1\n downloadpath=downloadpath\n\n if cadence=='short': \n savefigpath=savefigpath2\n savelcpath=savelcpath2\n downloadpath=downloadpath\n\n Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist = Get_stellar_params(ID,downloadpath)\n mdumps,t_0,t_1 = momentumdump_check(Sector)\n #calculate flux contamination of nearby stars within 3 TESS pixels\n flux_contamination_ratio = calc_flux_contamination(ID)\n ### \n ### \n ### \n stats = bls.compute_stats(BLS_Period,BLS_Dur,BLS_T0)\n BLS_Depth_odd,BLS_Depth_odd_err = stats['depth_odd'][0],stats['depth_odd'][1]\n BLS_Depth_even,BLS_Depth_even_err = stats['depth_even'][0],stats['depth_even'][1]\n \n odd_even_difference = abs(BLS_Depth_odd - BLS_Depth_even)\n odd_even_std_sum = BLS_Depth_odd_err + BLS_Depth_even_err\n odd_even_mismatch = odd_even_difference / odd_even_std_sum\n \n BLSbestfitdf = pd.DataFrame({\"BLS Period [d]\":BLS_Period, \"BLS Period Error\":BLS_Period_err,\"BLS TC [BTJD]\": BLS_T0, \"BLS depth [ppt]\":(BLS_Depth)*1000,\"BLS depth Error\":BLS_Depth_err,\"BLS SDE\":BLS_sde, \"BLS FAP\":np.nan,\"BLS Dur [hrs]\":BLS_Dur*24, \"BLS Odd Even Mismatch\":odd_even_mismatch,\"BLS Transit Count\":BLS_transit_count, \"Planet Radius [RE]\":R_p,\"Planet Radius Error\":R_p_err,\"CDPP [ppm/sqrt hr]\": cdpp, 'Time Span [d]':time_span, 'Stellar Radius [RS]':R_star, \"Stellar Mass [MS]\":M_star,\"Teff [K]\":Teff, \"Flux Contamination Ratio\":flux_contamination_ratio,\"Vmag\":Vmag,\"TESSmag\":Tmag,\"rmag\":rmag,\"imag\":imag,\"zmag\":zmag,\"Jmag\":Jmag,\"Hmag\":Hmag, \"Kmag\":Kmag,\"Momentum Dump Rate [d]\":mdumps,\"RA\":ra, \"DEC\":dec,\"logg\":logg,\"rho [g/ccm]\":rho,\"dist [pc]\":dist}, index=[0])\n \n BLSdf.to_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_BLS.txt\",index=False)\n BLSmodeldf.to_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_BLS_model.txt\",index=False)\n BLSbestfitdf.to_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_BLS_bestfit.txt\",index=False)\n \n return BLSdf, BLSmodeldf, BLSbestfitdf\n \n else:\n print(\" \")\n print(\"NaNs in mass or radius\")\n print(\"Stellar Mass: \",M_star,\" Radius: \", R_star)\n print(\" \")\n pass\n \n \ndef TransitSearch(method, ID,Sector,cadence,input_LC,N_transits,minP,oversampling_factor,duration_grid_step,path,for_injections=False): \n time = np.array(input_LC['Time'])\n flux = np.array(input_LC['Flux'])\n error= np.array(input_LC['Error'])\n ###\n if method=='TLS':\n TLSdf, TLSmodeldf, TLSbestfitdf,TLSTCsdf = TLS_func(ID,Sector,cadence,time,flux,error,N_transits,minP,oversampling_factor,duration_grid_step,path,for_injections)\n if isinstance(TLSdf, type(None)):\n return None,None,None\n else:\n PowerSpectrum_df = TLSdf\n TransitModel_df = TLSmodeldf\n TransitParams_df = TLSbestfitdf\n ###\n if method=='BLS': \n BLSdf, BLSmodeldf, BLSbestfitdf = BLS_func(ID,Sector,cadence,time,flux,error,N_transits,minP,oversampling_factor,duration_grid_step,path,for_injections)\n \n if isinstance(BLSdf, type(None)):\n return None,None,None\n else:\n PowerSpectrum_df = BLSdf\n TransitModel_df = BLSmodeldf\n TransitParams_df = BLSbestfitdf\n \n return PowerSpectrum_df, TransitModel_df, TransitParams_df\n\n\n\n\n\n\ndef Transit_plot(ID,Sector,cadence,method,input_LC, PowerSpectrum_df,TransitModel_df, TransitParams_df, path, for_injections=False):\n import os\n if for_injections==False:\n Path=path+'Sector_'+str(Sector)+'/'\n if for_injections==True:\n Path=path\n \n if cadence=='long':\n if method=='BLS':\n saveReportpath = Path+'FFI_BLS_Report/'\n if method=='TLS':\n saveReportpath = Path+'FFI_TLS_Report/'\n if cadence=='short': \n if method=='BLS':\n saveReportpath = Path+'TPF_BLS_Report/'\n if method=='TLS':\n saveReportpath = Path+'TPF_TLS_Report/'\n ###\n if os.path.exists(saveReportpath)==True:\n# print('folder exists, moving on...') #feel free to uncomment these out\n pass\n else:\n# print('making directory') \n os.makedirs(saveReportpath)\n \n #getting stellar parameters from TIC\n from transitleastsquares import catalog_info\n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n ### \n fs = 9\n spacing = 0.02\n #for reporting TLS model's planet radius\n R_earth = 6.378*10.0**8.0 #cm\n R_sun = 6.955*10.0**10.0 #cm \n ###\n time = np.array(input_LC['Time'])\n flux = np.array(input_LC['Flux'])\n error = np.array(input_LC['Error']) \n \n #recalculate window size used for smoothing\n LCDur=(np.nanmax(time) - np.nanmin(time))\n maxP = LCDur/2\n R_planet_RE = 1\n \n # we want to keep the longest transit for an Earth-like planet for a single sector of data\n # using stellar parameters to determine transit duration\n window_size = 3*Tdur(maxP, R_star,M_star, R_planet_RE)\n \n ###\n if method=='BLS':\n P = TransitParams_df['BLS Period [d]'].item()\n T0= TransitParams_df['BLS TC [BTJD]'].item()\n Dur= TransitParams_df['BLS Dur [hrs]'].item()\n R_p= TransitParams_df['Planet Radius [RE]'].item() \n \n Periods = np.array(PowerSpectrum_df['BLS Periods'])\n Power = np.array(PowerSpectrum_df['BLS SDE'])\n \n ModelT=np.array(TransitModel_df['Time']) \n ModelF=np.array(TransitModel_df['Model']) \n label1='BLS Period: '+str( np.round(P,4) )+\" days; Transit Duration: \"+str(np.round(Dur,4))+\" hours\"\n label2='BLS Model'\n \n savefile=saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_BLS.png\"\n if method=='TLS':\n P = TransitParams_df['TLS Period [d]'].item()\n T0= TransitParams_df['TLS TC [BTJD]'].item()\n Dur= TransitParams_df['TLS Dur [hrs]'].item()\n R_p= TransitParams_df['Planet Radius [RE]'].item() \n \n Periods = np.array(PowerSpectrum_df['TLS Periods'])\n Power = np.array(PowerSpectrum_df['TLS SDE'])\n \n ModelT=np.array(TransitModel_df['Time']) \n ModelF=np.array(TransitModel_df['Model']) \n label1='TLS Period: '+str( np.round(P,4) )+\" days; Transit Duration: \"+str(np.round(Dur,4))+\" hours\"\n label2='TLS Model'\n savefile=saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_TLS.png\"\n ###\n gs1 = gridspec.GridSpec(2, 2)\n gs1.update(left=0.65, right=1.25, wspace=0.25,hspace=0.5)\n ###\n ###\n fig = plt.figure(figsize=(10,6)) \n ax1 = fig.add_subplot(gs1[0:1, 0:2])\n ###\n plt.gca().get_xaxis().get_major_formatter().set_scientific(False)\n plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)\n ax1.axvline(x=P,color='r',label=label1) #rounding period to 4 decimal places \n ax1.axvline(x=0.5*P,color='r',linestyle='--')\n ax1.axvline(x=2.0*P,color='r',linestyle='--') \n ### \n mdumps,t_0,t_1 = momentumdump_check(Sector)\n ax1.axvline(x=mdumps,color='grey',linestyle='--',label='momentum dump rate (days): '+str(mdumps)) \n ### \n ax1.plot(Periods, Power, rasterized=True)\n ax1.set_title(\"TIC \"+str(ID)+\" \"+\"Sector \"+str(Sector))#\" Camera \"+Camera+\" CCD \"+CCD)\n ax1.set_xlabel(\"Period (days)\")\n ax1.set_xticks(np.arange(1.0, 15.0, 1.0))\n ax1.set_xlim(np.nanmin(Periods)-0.5, np.nanmax(Periods)+0.5)\n ###\n ax1.set_ylabel(\"SDE\")\n ax1.set_ylim(np.nanmin(Power)-0.5, np.nanmax(Power)+0.5)\n ax1.legend(loc='best',fancybox=True,framealpha=0.5)\n ###\n ### \n ax2 = fig.add_subplot(gs1[1:, 0:1])\n plt.gca().get_xaxis().get_major_formatter().set_scientific(False)\n plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)\n ax2.plot(time, flux ,c='red',markersize=4,marker='.',linestyle='none',zorder=1,label='detrended: windowsize: '+str(np.round(window_size*24,2))+' hrs')#+\", binsize: \"+str(bin_size)) \n ax2.plot(ModelT,ModelF,'b.',label=label2,markersize=2.5)\n ax2.legend(loc='upper center',ncol=3,fontsize=fs)\n ax2.set_ylim(np.nanmin(flux)-3*np.nanstd(flux), np.nanmax(flux)+3*np.nanstd(flux))\n ### \n #plotting momentum dumps\n t_0=np.nanmin(time)\n Num_mdumps = int(np.round((np.nanmax(time) - np.nanmin(time))/mdumps,2))+1\n print('') \n ###\n ### \n for N in range(Num_mdumps):\n time_mdump1 = t_0+(N)*mdumps\n time_mdump2 = t_1+(N+0.5)*mdumps \n if time_mdump1 < t_1:\n ax2.axvline(x=time_mdump1,zorder=-2)\n if time_mdump2 < np.nanmax(time):\n ax2.axvline(x=time_mdump2,zorder=-2)\n ### \n ax2.set_title(\"Star Radius: \"+str(np.round(R_star,3))+r\" $R_{\\odot}$ Star Mass: \"+str(np.round(M_star,3))+r\" $M_{\\odot}$\")\n ax2.set_xlabel(\"Time ( JD)\")\n ax2.set_ylabel(\"Normalized Flux\") \n ###\n ###\n pf_model,ff_model = phasefold(ModelT,T0,P,ModelF)\n pf,ff = phasefold(time,T0,P,flux)\n ###\n ###\n ax3 = fig.add_subplot(gs1[1:, 1:2])\n plt.gca().get_xaxis().get_major_formatter().set_scientific(False)\n plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)\n ax3.plot(24*pf,ff,c='red',markersize=6,marker='.',linestyle='none',zorder=1,label='detrended: windowsize: '+str(window_size*24)+' hrs')\n ax3.plot(24*pf_model,ff_model,'b.-',label=label2,markersize=5)\n ax3.set_xlabel(\"Orbital Phase (Hours)\")\n ax3.set_ylabel(\"Normalized Flux\") \n ax3.set_title(\"Planet Radius: \"+str(np.round(R_p,3))+\" in Earth radii\")\n ax3.set_ylim(np.nanmin(ModelF)-25*np.nanstd(ModelF), np.nanmax(ModelF)+25*np.nanstd(ModelF))\n # print('Dur check:', Dur)\n # print('Dur step:', int(3*Dur)/4)\n if Dur < 1:\n ax3.set_xticks(np.arange(-2,2+1,1))\n else:\n ax3.set_xticks(np.arange(int(-3*Dur),int(3*Dur)+int(3*Dur)/4,int(3*Dur)/4))\n # hours\n ax3.ticklabel_format(useOffset=False)\n ###\n if Dur> 8.0:\n ax3.set_xlim(-3*Dur,3*Dur)\n else:\n ax3.set_xlim(-5,5)\n ###\n if Dur< 1.0:\n ax3.set_xlim(-3*Dur,3*Dur)\n ###\n gs1.tight_layout(fig)\n ###\n plt.savefig(savefile)\n# plt.show()\n plt.close()\n \n \n# functions needed to produce 1 page TLS Validation Reports\n# functions needed to produce 1 page TLS Validation Reports\n# functions needed to produce 1 page TLS Validation Reports\n# functions needed to produce 1 page TLS Validation Reports\n# functions needed to produce 1 page TLS Validation Reports\n\n\n\ndef plot_odd_even_transits(LC_df, TLSbestfit_df, TLSTCs_df, TLSmodel_df, ax,fig):\n ax.set_title('All Odd / Even Events')\n \n markersize=5\n fontsize=12\n #T_C_x_position and T_C_y_position control where the text appears for time stamps of \"transit events\"\n T_C_x_position = -0.55\n T_C_y_position =0.002\n \n time = np.array(LC_df['Time'])\n flux = np.array(LC_df['Detrended Flux'])\n error = np.array(LC_df['Detrended Error'])\n \n P = TLSbestfit_df['TLS Period [d]'].item()\n T0 = TLSbestfit_df['TLS TC [BTJD]'].item()\n Dur= TLSbestfit_df['TLS Dur [hrs]'].item()\n Depth = (TLSbestfit_df['TLS depths [ppt]'].item())/1000 #in ppo now\n \n spacing = 4* (Depth)\n \n XLIM=1.5*Dur\n \n T_C_array = np.array(TLSTCs_df['TLS TCs [BTJD]'])\n Depths_array = np.array(TLSTCs_df['TLS Depths'])\n Depths_err_array = np.array(TLSTCs_df['TLS Depths Error'])\n \n Modeltime = np.array(TLSmodel_df['Time'])\n Modelflux = np.array(TLSmodel_df['Model'])\n pf_model,ff_model = phasefold(T0,Modeltime,P,Modelflux)\n \n # cutting a 1 days worth of data around individual odd/even transits\n # and appending them into odd/even arrays for comparison\n \n \n window_size = 1 #day\n EvenDepths=[]\n OddDepths=[]\n Even=[]\n Evenflux=[]\n Odd=[]\n Oddflux=[]\n for x in range(len(T_C_array)):\n if x %2 ==0: #even\n EvenDepths=np.append(EvenDepths,Depths_array[x])\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n if len(cut_f)<1: #in case window size is too small to cut data around\n window_size=1.5\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut] \n ###\n phasefolded,foldedflux = phasefold(T_C_array[x],cut_t,P,cut_f)\n Even=np.append(Even,phasefolded)\n Evenflux=np.append(Evenflux,foldedflux)\n \n \n else: #odd\n OddDepths=np.append(OddDepths,Depths_array[x])\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n if len(cut_f)<1: #in case window size is too small to cut data around\n window_size=1.5\n cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]\n cut_t = time[cut]\n cut_f = flux[cut]\n cut_fe = error[cut]\n phasefolded,foldedflux = phasefold(T_C_array[x],cut_t,P,cut_f)\n Odd=np.append(Odd,phasefolded)\n Oddflux=np.append(Oddflux,foldedflux)\n \n ax.plot(24*Odd,np.array(Oddflux)+spacing,color='lightblue',marker='.',linestyle='none',markersize=markersize+1, rasterized=True,label='Odd') \n ax.plot(24*Even,np.array(Evenflux),color='dimgrey',marker='.',linestyle='none',markersize=markersize+1, rasterized=True,label='Even')\n ax.plot(24*pf_model,ff_model,'r.-',linewidth=1,markersize=2,label='TLS Model')\n ax.plot(24*pf_model,ff_model+spacing,'r.-',linewidth=1,markersize=2)\n ###\n ymax = np.nanmax(np.nanmean(ff_model)+2*spacing)\n ymin = np.nanmin(np.nanmean(ff_model)-spacing) \n\n \n ax.set_xlim(-XLIM,XLIM)\n ax.set_ylim(ymin,ymax)\n ax.set_xlabel('Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')\n ax.set_ylabel('Normalized Flux + Offset')\n \n \n #get odd/even metrics from TLS\n odd_even_mismatch = (TLSbestfit_df['TLS Odd Even Mismatch'].item()) #in standard deviations\n \n tx=0.085\n ty=0.915\n ax.text(tx,ty,'N Transits: '+str(len(T_C_array))+' O/E mismatch '+str(np.round(odd_even_mismatch,3))+r' ${\\sigma}$', transform=fig.transFigure, size=fontsize-2)\n \n ax.axhline(y=1-np.nanmean(EvenDepths)/1000,color='green',linestyle='-')\n ax.axhline(y=1+spacing-np.nanmean(OddDepths)/1000,color='green',linestyle='-',label='Odd/Even Mismatch')\n \n handles, labels = ax.get_legend_handles_labels()\n by_label = dict(zip(labels, handles))\n# ax.legend(by_label.values(), by_label.keys(),ncol=2,fontsize=fs-1)\n\n ax.annotate(\"Odd\", xy=( -1, np.nanmean(ff_model)+1.5*spacing ), va='top',xycoords='data', fontsize=fontsize+4,weight=\"bold\")\n ax.annotate(\"Even\", xy=(-1, np.nanmean(ff_model)-0.5*spacing ), va='top',xycoords='data', fontsize=fontsize+4,weight=\"bold\")\n\ndef plot_power_spectra(TLS_df,TLSbestfit_df, ax):\n #power spectra\n TLS_periods= TLS_df['TLS Periods']\n TLS_Power = TLS_df['TLS Power']\n \n #best fit params\n P = TLSbestfit_df['TLS Period [d]'].item()\n RP = TLSbestfit_df['Planet Radius [RE]'].item()\n Depth = TLSbestfit_df['TLS depths [ppt]'].item()\n mdumps=TLSbestfit_df['Momentum Dump Rate [d]'].item()\n \n ax.axvline(x=P,color='r')\n if 0.5*P> np.nanmin(TLS_periods):\n ax.axvline(x=0.5*P,color='r',linestyle='--')\n ###\n if 2.0*P < np.nanmax(TLS_periods):\n ax.axvline(x=2.0*P,color='r',linestyle='--')\n \n ax.plot(TLS_periods,TLS_Power, color='black', rasterized=True)\n\n ax.axvline(x=mdumps,color='grey',linestyle='--')\n ax.set_xlabel('Period [days]')\n ax.set_ylabel('TLS Power')\n ax.set_xticks(np.arange(np.nanmin(TLS_periods), np.nanmax(TLS_periods)+1, 1))\n if np.nanmax(TLS_Power)> 12:\n ax.set_yticks(np.arange(int(np.nanmin(TLS_Power)), int(np.nanmax(TLS_Power)+5), 5))\n if (np.nanmax(TLS_Power)>= 7) & (np.nanmax(TLS_Power)< 12):\n ax.set_yticks(np.arange(int(np.nanmin(TLS_Power)), int(np.nanmax(TLS_Power)+2), 2)) \n if np.nanmax(TLS_Power)< 7:\n ax.set_yticks(np.arange(int(np.nanmin(TLS_Power)), int(np.nanmax(TLS_Power)+1), 1))\n ax.set_title('TLS Power Spectrum: '+'Period '+str(np.round(P,3))+' d'+' Depth '+str(np.round(Depth,3))+' ppt'+' Planet Radius: '+str(np.round(RP,3))+' RE') \n\n \ndef fullphasefold(time,T0,period,flux,offset):\n phase= (time - T0 + offset*period) / (period) - np.floor((time - T0 + offset*period) / period)\n ind=np.argsort(phase, axis=0)\n return phase[ind],flux[ind]\n \ndef plot_phasefold_LCs(ID,Sector,LC_df,TLS_df,TLSbestfit_df,TLSTCs_df,TLSmodel_df, axa,axb,axc,axd,axe):\n #fontsize\n fs=12\n \n #plots LC, PFLC, 0.5*P PFLC, 2*P PFLC and full PFLC \n time=np.array(LC_df['Time'])\n flux=np.array(LC_df['Detrended Flux'])\n error=np.array(LC_df['Detrended Error'])\n sap_flux=np.array(LC_df['SAP Flux'])\n sap_error=np.array(LC_df['SAP Error'])\n \n \n modeltime = np.array(TLSmodel_df['Time'])\n modelflux = np.array(TLSmodel_df['Model'])\n \n T0 = TLSbestfit_df['TLS TC [BTJD]'].item()\n P = TLSbestfit_df['TLS Period [d]'].item()\n Dur=TLSbestfit_df['TLS Dur [hrs]'].item()\n Depth=TLSbestfit_df['TLS depths [ppt]'].item()/1000\n \n XLIM=3.5*Dur\n YLIM=2*Depth\n \n T_C_array = np.array(TLSTCs_df['TLS TCs [BTJD]'])\n \n #calculate full phase 0 to 1 + an offset to shift midtransit from 0 to offset\n offset=0.25\n fullphase, fullphaseflux = fullphasefold(time,T0,P,flux,offset) \n \n #calculate phase in hours since T0 for 0.5x, 1x, 2x Period\n phasefolda, phasefoldfluxa = phasefold(time,T0,P,flux)\n phasefoldb, phasefoldfluxb = phasefold(time,T0,P*0.5,flux)\n phasefoldc, phasefoldfluxc = phasefold(time,T0,P*2.0,flux)\n \n #do same for transit models\n fullphase_model, fullphaseflux_model = fullphasefold(modeltime,T0,P,modelflux,offset)\n phasefold_modela, phasefoldflux_modela = phasefold(modeltime,T0,P,modelflux)\n phasefold_modelb, phasefoldflux_modelb = phasefold(modeltime,T0,0.5*P,modelflux)\n phasefold_modelc, phasefoldflux_modelc = phasefold(modeltime,T0,2.0*P,modelflux)\n\n #power spectra limits for PFLCs\n TLSPmin,TLSPmax = np.nanmin(np.array(TLS_df['TLS Periods'])) , np.nanmax(np.array(TLS_df['TLS Periods']))\n \n # plot LC\n cdpp_sap = CDPP(time,sap_flux,sap_error,'median','ppm',binsize=(1.0/24.0))\n cdpp_det = CDPP(time,flux,error,'median','ppm',binsize=(1.0/24.0))\n \n axa.set_title(r'Light Curve CDPPs: SAP CDPP = '+str(np.round(cdpp_sap,1))+' $\\sigma _{ppm}$ ''hr$^{-1/2}$, Detrended CDPP ='+str(np.round(cdpp_det,1))+' $\\sigma _{ppm}$ ''hr$^{-1/2}$') \n \n mdumps,t_0,t_1 = momentumdump_check(Sector)\n t_0=np.nanmin(time) #sometimes data near beginning gets chopped based on TESS DRNs\n if Sector==31:\n t_0end = 2157.45371\n t_1end = 2169.94398\n time_mdump1 = t_0+ (t_0end - t_0)/2\n time_mdump2 = t_1+ (t_1end - t_1)/2\n axa.axvline(x=time_mdump1,zorder=-2)\n axa.axvline(x=time_mdump2,zorder=-2) \n else:\n Num_mdumps = int(np.round((np.nanmax(time)-np.nanmin(time))/mdumps,2))+1\n for N in range(Num_mdumps):\n time_mdump1 = t_0+(N)*mdumps\n time_mdump2 = t_1+(N+0.5)*mdumps \n if time_mdump1 < t_1:\n axa.axvline(x=time_mdump1,zorder=-2) \n if time_mdump2 < np.nanmax(time):\n axa.axvline(x=time_mdump2,zorder=-2)\n \n axa.plot(time,flux,'k.',markersize=3,zorder=1, rasterized=True)\n axa.plot(np.array(TLSmodel_df['Time'].to_list()),np.array(TLSmodel_df['Model'].to_list())\\\n ,'r.',markersize=1, rasterized=True)\n for x in range(len(T_C_array)):\n ### plotting 3 slightly overlapping to make it more obvious in tiny subplot window\n axa.plot(T_C_array[x], 1.0+1.5*Depth, marker=r'$\\downarrow$',color='cyan', rasterized=True)\n axa.plot(T_C_array[x], 1.0+1.6*Depth, marker=r'$\\downarrow$',color='cyan', rasterized=True)\n axa.plot(T_C_array[x], 1.0+1.7*Depth, marker=r'$\\downarrow$',color='cyan', rasterized=True) \n ###\n# tx=0.39\n# ty=0.8\n# axa.text(tx,ty,'Momentum Dump Rate: '+str(mdumps)+' days', transform=fig.transFigure, size=fs-2)\n axa.set_xlabel('Time [BTJD]')\n axa.set_ylabel('Norm. Flux')\n \n \n # plot PFLC\n axb.set_title('Phase Folded Light Curve',fontsize=fs-1) \n axb.plot(24*phasefolda, phasefoldfluxa,'k.',markersize=3,zorder=0, rasterized=True)\n axb.plot(24*phasefold_modela, phasefoldflux_modela,'r.-',markersize=2,zorder=1, rasterized=True)\n axb.set_xlabel(r'Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')\n axb.set_ylabel('Norm. Flux')\n \n # plot full PFLC\n axc.set_title(\"Full Phase Folded Light Curve\",fontsize = fs)\n axc.plot(fullphase, fullphaseflux,'k.',markersize=3,zorder=0, rasterized=True) \n axc.plot(fullphase_model, fullphaseflux_model,'r.-',markersize=2,zorder=1, rasterized=True) \n axc.set_xlabel('Phase + 0.25')\n axc.set_ylabel('Norm. Flux')\n \n # plot PFLC with 0.5x P\n axd.set_title('0.5x Period = '+(str(np.round(0.5*P,3)))+' days')\n axd.plot(24*phasefoldb, phasefoldfluxb,'k.',markersize=3,zorder=0, rasterized=True)\n #models never looks good at 1/2x \n # axd.plot(24*phasefold_modelb, phasefoldflux_modelb,'r.-',markersize=2,zorder=1, rasterized=True)\n #models never looks good at 1/2x \n axd.set_xlabel(r'Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')\n axd.set_ylabel('Norm. Flux')\n \n # plot PFLC with 2x P\n axe.set_title('2x Period = '+(str(np.round(2*P,3)))+' days')\n axe.plot(24*phasefoldc, phasefoldfluxc,'k.',markersize=3,zorder=0, rasterized=True)\n axe.plot(24*phasefold_modelc, phasefoldflux_modelc,'r.-',markersize=2,zorder=1, rasterized=True)\n axe.set_xlabel(r'Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')\n axe.set_ylabel('Norm. Flux')\n \n \n axc.set_xticks(np.arange(0.0, 1+0.25, 0.25)) \n if XLIM < 8:\n axb.set_xticks(np.arange(int(-XLIM), int(XLIM)+1, 1.0)) \n axd.set_xticks(np.arange(int(-XLIM), int(XLIM)+1, 1.0))\n axe.set_xticks(np.arange(int(-XLIM), int(XLIM)+1, 1.0))\n if XLIM > 8:\n axb.set_xticks(np.arange(int(-XLIM), int(XLIM)+2, 2.0))\n axd.set_xticks(np.arange(int(-XLIM), int(XLIM)+2, 2.0))\n axe.set_xticks(np.arange(int(-XLIM), int(XLIM)+2, 2.0))\n \n \n axb.set_xlim(-XLIM,XLIM)\n axc.set_xlim(-0.01,1.01)\n axd.set_xlim(-XLIM,XLIM)\n axe.set_xlim(-XLIM,XLIM)\n \n axa.set_ylim(1-YLIM,1+YLIM)\n axb.set_ylim(1-YLIM,1+YLIM)\n axc.set_ylim(1-YLIM,1+YLIM)\n axd.set_ylim(1-YLIM,1+YLIM)\n axe.set_ylim(1-YLIM,1+YLIM)\n #turn off exponential notiation in axes\n axa.ticklabel_format(useOffset=False)\n axb.ticklabel_format(useOffset=False)\n axc.ticklabel_format(useOffset=False)\n axd.ticklabel_format(useOffset=False)\n axe.ticklabel_format(useOffset=False)\n\n\n \ndef Get_FFI(ID,Sector,cadence,path,use_SPOC_aperture='no',for_injections=False):\n #Step 0: Creating directories to save figures and data\n import pandas as pd \n verbose=False\n if for_injections==False:\n Path=path+'Sector_'+str(Sector)+'/'\n if for_injections==True:\n Path=path\n if cadence=='long':\n saveReportpath = Path+'FFI_TLS_Report/'\n savelcpath= Path+'FFI_PLD_LCs/'\n downloadpath = Path+'cache/'\n if cadence=='short': \n saveReportpath = Path+'TPF_TLS_Report/'\n savelcpath= Path+'TPF_PLD_LCs/'\n downloadpath = Path+'cache/'\n try:\n bkg_mask = readNDarr(savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_bkg_mask\")\n pix_mask = readNDarr(savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_pix_mask\")\n images = readNDarr(savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_image_fluxes\")\n median_image = np.nanmedian(images, axis=0)\n \n try:\n hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,\\\n minimum_photon_counts=1,verbose=True,\\\n downloadpath=downloadpath)\n except TypeError as TE:\n print(TE)\n import time as clock\n os.system('rm -r ~/.astropy/cache/download/py3/lock') #clear any locks that might be in cache\n clock.sleep(10) #wait 10 seconds and try again\n hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,\\\n minimum_photon_counts=1,verbose=True,\\\n downloadpath=downloadpath) \n except FileNotFoundError as FNFE:\n print('')\n print(FNFE)\n print('recreating cutouts, aperture and background masks with default settings')\n print(' ')\n #Step 1: Download FFI Cutout from MAST\n # sometimes MAST/Astropy has issues, if it fails try again\n # if it got to this point, the FFI definitely exists!\n try:\n hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,\\\n minimum_photon_counts=1,verbose=True,\\\n downloadpath=downloadpath)\n except TypeError as TE:\n print(TE)\n import time as clock\n os.system('rm -r ~/.astropy/cache/download/py3/lock') #clear any locks that might be in cache\n clock.sleep(10) #wait 10 seconds and try again\n hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,\\\n minimum_photon_counts=1,verbose=True,\\\n downloadpath=downloadpath)\n print('') \n #step 2: get aperture and background masks\n bkg_mask, pix_mask ,flux, median_image, SAP_LC = SAP(ID=ID,Sector=Sector,cutoutsize=11,hdu=hdu,\\\n quality_mask=quality_mask,threshold=7.5,cadence=cadence,\\\n reference_pixel=reference_pixel,verbose=False,\\\n savelcpath=savelcpath,use_SPOC_aperture='no') \n #resave pkl data\n saveNDarr(pix_mask,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_pix_mask\")\n saveNDarr(bkg_mask,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_bkg_mask\")\n saveNDarr(flux,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_image_fluxes\") \n ###\n #Step 3: Get information on target star and apply some basic selection cuts\n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID) \n ###\n ###\n #Get more stellar params\n ###\n Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist = Get_stellar_params(ID,downloadpath)\n ###\n CCD=hdu[0].header['CCD']\n Camera=hdu[0].header['Camera'] \n wcs = WCS(hdu[2].header)\n return median_image, hdu, wcs, pix_mask, bkg_mask, Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist,CCD,Camera \n \n \ndef plot_image(ID,Sector,cadence,path,ax_placement,fig,fs,for_injections=False):\n if for_injections==False:\n Path=path+'Sector_'+str(Sector)+'/'\n if for_injections==True:\n Path=path\n if cadence=='long':\n saveReportpath = Path+'FFI_TLS_Report/'\n savelcpath= Path+'FFI_PLD_LCs/'\n downloadpath = Path+'cache/'\n if cadence=='short': \n saveReportpath = Path+'TPF_TLS_Report/'\n savelcpath= Path+'TPF_PLD_LCs/'\n downloadpath = Path+'cache/'\n \n #get image data and stellar params\n median_image, hdu,wcs, pix_mask, bkg_mask,Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist,CCD,Camera = Get_FFI(ID,Sector,cadence,path,for_injections=for_injections) \n \n ax = fig.add_subplot(ax_placement,projection=wcs)\n if cadence=='short':\n x=hdu[1].header['1CRPX4']-1\n y=hdu[1].header['2CRPX4']-1\n ax.set_title(\"TPF Cutout\",fontsize = fs)\n if cadence=='long':\n x=hdu[1].header['1CRPX4']\n y=hdu[1].header['2CRPX4']\n ax.set_title(\"FFI Cutout\",fontsize = fs)\n reference_pixel=[x,y] \n axes=[ax]\n plot_cutouts(ID,Sector,cadence,hdu,pix_mask,bkg_mask,reference_pixel,fig,axes,savelcpath,downloadpath,do_colorbar='no')\n \n \n# Let's work on getting DSS images along with our FFI cutouts\ndef getDSS(ID,cutoutsize,downloadpath):\n\n # astroquery\n from astroquery.mast import Tesscut\n from astroquery.mast import Catalogs\n\n # Astropy\n import astropy.units as u\n from astropy.io import fits\n from astropy.wcs import WCS\n from astropy.coordinates import SkyCoord\n from astropy.visualization import simple_norm\n\n from reproject import reproject_interp\n import socket \n import urllib\n import time as clock\n import requests\n \n from astroplan import FixedTarget\n# from astroplan.plots import plot_finder_image\n from astroquery.skyview import SkyView\n from astroplan import download_IERS_A\n \n # changing cache directories\n Tesscut.cache_location=downloadpath\n Catalogs.cache_location=downloadpath\n SkyView.cache_location=downloadpath\n \n starName=\"TIC \"+str(ID)\n degrees = 21/3600 #21 arcsec to degrees\n radSearch = degrees # angular radius in degrees\n catalogData = Catalogs.query_object(starName, radius = radSearch, catalog = \"TIC\")\n\n #checking to see if target is correct in catalog and not nearby stars\n for x in range(len(catalogData['ID'])):\n if int(catalogData['ID'][x])==ID:\n\n ra = catalogData[x]['ra']\n dec = catalogData[x]['dec']\n Tmag = catalogData[x]['Tmag']\n Teff = catalogData[x]['Teff']\n Vmag = catalogData[x]['Vmag'] \n coord = SkyCoord(ra, dec, unit = \"deg\")\n try:\n download_IERS_A()\n except (socket.timeout,FileNotFoundError,RuntimeError) as STO:\n print('')\n print('DSS Request Timeout(?)')\n print(STO)\n print('trying again')\n clock.sleep(10)\n #clear any potential locks in cache\n os.system('rm -r ~/.astropy/cache/download/py3/lock') \n try:\n download_IERS_A()\n except (socket.timeout,FileNotFoundError,RuntimeError) as STO:\n print('DSS Request Timeout Again(?)')\n print('...oh well?')\n #clear any potential locks in cache\n os.system('rm -r ~/.astropy/cache/download/py3/lock') \n pass\n except RuntimeError as RE:\n print(RE)\n print('')\n #os.system('rm -r ~/.astropy/cache/download/py3/lock') \n clock.sleep(10)\n try: \n download_IERS_A()\n except socket.timeout as STO:\n print('DSS Request Timeout Again')\n print('...oh well?')\n pass\n survey = 'DSS2 Red'\n target_coord = SkyCoord(ra=ra*u.deg, dec=dec*u.deg)\n sizepix=(cutoutsize)*21*u.arcsec\n fov_radius = (cutoutsize)*21*u.arcsec\n grid=False\n \n try:\n target = FixedTarget(coord=target_coord, name=\"Survey = {}\".format(survey))\n except: # if DSS2 Red is not available, download the DSS field of view image instead\n survey = 'DSS'\n target = FixedTarget(coord=target_coord, name=\"Survey = {}\".format(survey))\n\n coord = target if not hasattr(target, 'coord') else target.coord\n position = coord.icrs\n coordinates = 'icrs'\n target_name = None if isinstance(target, SkyCoord) else target.name\n\n dss_pixel_scale=1.7\n tess_pixel_scale=21\n \n fficutout=cutoutsize*tess_pixel_scale\n \n #translate to dss pixel scale\n npixels_for_dss=int(fficutout*dss_pixel_scale) \n try:\n hdu = SkyView.get_images(position=position, coordinates=coordinates,\n survey=survey, radius=fov_radius, pixels=npixels_for_dss, grid=grid)[0][0]\n wcs = WCS(hdu.header)\n return hdu,hdu.data,wcs\n except (urllib.error.HTTPError,RuntimeError) as URLE:\n print(' ')\n print(URLE)\n print('problem with getting DSS image')\n print(' ')\n return None,None,None\n except (requests.exceptions.ReadTimeout, astroquery.exceptions.TimeoutError,socket.timeout) as STO:\n print('')\n print('DSS Request Timeout')\n print(STO)\n print('trying again')\n clock.sleep(60)\n try:\n hdu = SkyView.get_images(position=position, coordinates=coordinates,\n survey=survey, radius=fov_radius, pixels=npixels_for_dss, grid=grid)[0][0]\n wcs = WCS(hdu.header)\n return hdu,hdu.data,wcs\n except urllib.error.HTTPError as URLE:\n print(' ')\n print(URLE)\n print('problem with getting DSS image')\n print(' ')\n return None,None,None\n except (requests.exceptions.ReadTimeout, astroquery.exceptions.TimeoutError,socket.timeout):\n return None,None,None\n\ndef plot_centroids_in_phase(LC_df,TLSbestfit_df,ax):\n fs=12\n ax.set_title(\"Centroid Motion\",fontsize = fs)\n \n centx= np.array(LC_df['Centroid X Positions'])\n centy =np.array(LC_df['Centroid Y Positions'])\n time = np.array(LC_df['Time'])\n\n centx-=np.median(centx)\n centy-=np.median(centy)\n \n sigmalimx = 5*np.nanstd(centx)\n sigmalimy = 5*np.nanstd(centy)\n \n\n T0 =TLSbestfit_df['TLS TC [BTJD]'].item()\n P =TLSbestfit_df['TLS Period [d]'].item()\n Dur=TLSbestfit_df['TLS Dur [hrs]'].item()\n XLIM=3.5*Dur\n \n pfx,xx = phasefold(T0, time,P,centx)\n pfy,yy = phasefold(T0, time,P,centy)\n \n axb = ax.twinx() #make 2nd y axis for Y centroid positions\n \n ax.plot(24*pfx,xx,'k.')\n axb.plot(24*pfy,yy,'r.')\n ax.set_xlim(-XLIM,XLIM)\n ax.set_ylabel('Delta X')\n axb.set_ylabel('Delta Y',rotation=270)\n axb.yaxis.label.set_color('red')\n axb.tick_params(axis='y', colors='red')\n ax.set_xlabel('Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')\n ax.axhline(y=sigmalimx,color='green',linestyle='--')\n ax.axhline(y=-sigmalimx,color='green',linestyle='--')\n ax.axhline(y=sigmalimy,color='cyan',linestyle='--')\n ax.axhline(y=-sigmalimy,color='cyan',linestyle='--')\n \ndef plot_dss_orientation(ax,ID,downloadpath,cutoutsize=11,do_DSS_plot=True):\n fs=12\n ax.set_title(\"DSS\",fontsize = fs)\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_yticks([])\n ax.set_xticks([])\n if do_DSS_plot==True:\n dsshdu,dss,wcs=getDSS(ID,cutoutsize,downloadpath)\n if do_DSS_plot==False:\n #for transit injections don't do this\n print('NOT doing getDSS for injections (takes too long...)')\n #dsshdu,dss,wcs=getDSS(ID,cutoutsize,downloadpath)\n #for transit injections don't do this\n dsshdu,dss,wcs = None, None, None\n \n if isinstance(dss, type(None)):\n print('no DSS image, see above output')\n ax.text(0.5, 0.5, 'No DSS Image', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)\n else: \n ax.imshow(dss,cmap=plt.cm.Greys)\n ###\n northisup=True\n eastisright=False #default?\n eastisright=True #force it to be true!\n\n lwr = 2.5\n cr = 'firebrick'\n arrowkwargs = {'width':0.5, 'headwidth':4, 'shrink':0.05, 'color':cr, 'alpha':0.5}\n\n if northisup==True:\n if eastisright==True:\n ax.invert_xaxis()\n ax.invert_yaxis() #<---is this right?\n shift=0.025\n ax.annotate('', xy=(0.01+shift, 0.25), xytext=(0.01+shift, 0.05), xycoords=\"axes fraction\", textcoords=\"axes fraction\", arrowprops=arrowkwargs)\n ax.annotate('N', xy=(0.01+shift, 0.255), xycoords=\"axes fraction\", color=cr)\n ax.annotate('', xy=(0.2+shift, 0.060), xytext=(0.+shift, 0.060), xycoords=\"axes fraction\", textcoords=\"axes fraction\", arrowprops=arrowkwargs)\n ax.annotate('E', xy=(0.22+shift, 0.022), xycoords=\"axes fraction\", color=cr)\n\n else:\n ax.invert_yaxis()\n ax.annotate('', xy=(0.95, 0.25), xytext=(0.95, 0.05), xycoords=\"axes fraction\", textcoords=\"axes fraction\", arrowprops=arrowkwargs)\n ax.annotate('N', xy=(0.94, 0.255), xycoords=\"axes fraction\", color=cr)\n ax.annotate('', xy=(0.76, 0.060), xytext=(0.96, 0.060), xycoords=\"axes fraction\", textcoords=\"axes fraction\", arrowprops=arrowkwargs)\n ax.annotate('E', xy=(0.73, 0.022), xycoords=\"axes fraction\", color=cr)\n\n \n \n\ndef plot_text(ID,Sector,TLSbestfit_df,TLSTCs_df,EDI_results,fig):\n fs=12\n star_header='Stellar Parameters for TIC '+str(ID) \n TLStxt_header='TLS Results'\n startxt_array=['Stellar Mass [MS]','Stellar Radius [RS]','Teff [K]', 'Vmag', 'TESSmag', 'Jmag', 'Hmag', 'Kmag','dist [pc]','logg','RA', 'rho [g/ccm]','DEC']\n TLStxt_array=['TLS Period [d]', 'TLS TC [BTJD]', 'TLS Dur [hrs]','TLS depths [ppt]','Planet Radius [RE]','TLS SDE', 'TLS Odd Even Mismatch','TLS FAP']\n \n \n vertspacing=0.01\n horispacing=1.1\n ###\n fontx=0.6275\n fonty=0.325\n #star stuff\n N=fonty-0.025\n fig.text(fontx+0.01,N,star_header, transform=fig.transFigure, size=fs+4)\n N=5\n FS=fs-2\n for x in range(0,len(startxt_array)):\n if x==(len(startxt_array)-1):\n fig.text(fontx+horispacing*0.175,fonty-(N+x)*vertspacing,\"Sector : \"+str(Sector), transform=fig.transFigure, size=FS)\n #left\n if x % 2 == 0:\n if startxt_array[x]=='rho [g/ccm]':\n text=r'${\\rho}$ [g/cm$^{3}$]'\n fig.text(fontx,fonty-(N+x)*vertspacing,text+\" : \"+str(np.round(TLSbestfit_df[startxt_array[x]].item(),3)), size=FS)\n else:\n fig.text(fontx,fonty-(N+x)*vertspacing,startxt_array[x]+\" : \"+str(np.round(TLSbestfit_df[startxt_array[x]].item(),3)), size=FS)\n else: #right\n if startxt_array[x]=='rho [g/ccm]':\n text=r'${\\rho}$ [g/cm$^{3}$]'\n fig.text(fontx+horispacing*0.175,fonty-(N-1+x)*vertspacing,text+\" : \"+str(np.round(TLSbestfit_df[startxt_array[x]].item(),3)), size=FS)\n else:\n fig.text(fontx+horispacing*0.175,fonty-(N-1+x)*vertspacing,startxt_array[x]+\" : \"+str(np.round(TLSbestfit_df[startxt_array[x]].item(),3)), size=FS)\n ###\n #TLS stuff\n N=N+len(startxt_array)+2\n fig.text(fontx+horispacing*0.175/2,fonty-N*vertspacing,TLStxt_header, transform=fig.transFigure, size=fs+4)\n N=N+3\n for x in range(0,len(TLStxt_array)):\n if x % 2 == 0: #left\n if TLStxt_array[x]=='TLS FAP':\n text = str(TLSbestfit_df['TLS FAP'].item())#\"\n text = \"{:.2e}\".format(TLSbestfit_df['TLS FAP'].item())\n fig.text(fontx,fonty-(N+x)*vertspacing,str(TLStxt_array[x])+\" : \"+text, transform=fig.transFigure, size=FS,color='black')\n elif TLStxt_array[x]=='TLS Odd Even Mismatch':\n text='Odd/Even Mismatch'\n if TLSbestfit_df[TLStxt_array[x]].item() > 5:\n fig.text(fontx,fonty-(N+x)*vertspacing,text+\" : \"+str(np.round(TLSbestfit_df[TLStxt_array[x]].item(),3))+r' ${\\sigma}$', transform=fig.transFigure, size=FS,color='red')\n else:\n fig.text(fontx,fonty-(N+x)*vertspacing,text+\" : \"+str(np.round(TLSbestfit_df[TLStxt_array[x]].item(),3))+r' ${\\sigma}$', transform=fig.transFigure, size=FS)\n elif TLStxt_array[x]=='TLS depths [ppt]':\n text='TLS Depth [ppt]'\n fig.text(fontx,fonty-(N+x)*vertspacing,text+\" : \"+str(np.round(TLSbestfit_df[TLStxt_array[x]].item(),3)), transform=fig.transFigure, size=FS) \n else:\n fig.text(fontx,fonty-(N+x)*vertspacing,TLStxt_array[x]+\" : \"+str(np.round(TLSbestfit_df[TLStxt_array[x]].item(),3)), transform=fig.transFigure, size=FS)\n else: #right\n if TLStxt_array[x]=='TLS FAP':\n text = str(TLSbestfit_df['TLS FAP'].item())#\"{:.2e}\".format(TLSbestfit_df['TLS FAP'].item())\n text = \"{:.2e}\".format(TLSbestfit_df['TLS FAP'].item()) \n fig.text(fontx+horispacing*0.175,fonty-(N-1+x)*vertspacing,str(TLStxt_array[x])+\" : \"+text, transform=fig.transFigure, size=FS,color='black')\n elif TLStxt_array[x]=='TLS Odd Even Mismatch':\n text='Odd/Even Mismatch'\n if TLSbestfit_df[TLStxt_array[x]].item() > 5:\n fig.text(fontx+horispacing*0.175,fonty-(N-1+x)*vertspacing,text+\" : \"+str(np.round(TLSbestfit_df[TLStxt_array[x]].item(),3))+r' ${\\sigma}$', transform=fig.transFigure, size=FS,color='red')\n else:\n fig.text(fontx+horispacing*0.175,fonty-(N-1+x)*vertspacing,text+\" : \"+str(np.round(TLSbestfit_df[TLStxt_array[x]].item(),3))+r' ${\\sigma}$', transform=fig.transFigure, size=FS)\n elif TLStxt_array[x]=='TLS depths [ppt]':\n text='TLS Depth [ppt]'\n fig.text(fontx+horispacing*0.175,fonty-(N-1+x)*vertspacing,text+\" : \"+str(np.round(TLSbestfit_df[TLStxt_array[x]].item(),3)), transform=fig.transFigure, size=FS) \n else:\n fig.text(fontx+horispacing*0.175,fonty-(N-1+x)*vertspacing,TLStxt_array[x]+\" : \"+str(np.round(TLSbestfit_df[TLStxt_array[x]].item(),3)), transform=fig.transFigure, size=FS)\n \n #place text at bottom (only whitespace left...)\n edispace=0.1225\n edix=0.02\n ediy=0.98\n edicolor='red'\n #finding where EDI Vetter produced False Positive flags\n EDI_cols=list(EDI_results.columns.values)[2:] #ignoring ID,Sector columns\n for FP in range(len(EDI_cols)):\n if EDI_results[EDI_cols[FP]].item()==False:\n fig.text(edix+FP*edispace,ediy,EDI_cols[FP],fontsize=fs-5,color='black') \n if EDI_results[EDI_cols[FP]].item()==True:\n fig.text(edix+FP*edispace,ediy,EDI_cols[FP],fontsize=fs-5,color=edicolor) \n\n\ndef TLS_Report(ID,Sector,cadence,path,keep_FITS=False,keep_imagedata=True, for_injections=False):\n ###\n # making a 1 page validation report summarizing the overall transit search \n ###\n if for_injections==False:\n Path=path+'Sector_'+str(Sector)+'/'\n if for_injections==True:\n Path=path\n #\n if cadence=='long':\n saveReportpath = Path+'FFI_TLS_Report/'\n savelcpath= Path+'FFI_PLD_LCs/'\n downloadpath = Path+'cache/'\n if cadence=='short': \n saveReportpath = Path+'TPF_TLS_Report/'\n savelcpath= Path+'TPF_PLD_LCs/'\n downloadpath = Path+'cache/'\n ### \n #creating directory if it already doesn't exist\n if os.path.exists(saveReportpath)==True:\n pass\n else:\n os.makedirs(saveReportpath)\n ###\n #in case it was deleted by a previous run:\n if os.path.exists(downloadpath)==True:\n pass\n else:\n os.makedirs(downloadpath)\n ###\n ###\n #files needed to compile results\n # light curves and centroids\n LC_df = pd.read_csv(savelcpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_final_LC.txt')\n ###\n # TLS results\n TLS_df = pd.read_csv(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLS.txt')\n TLSmodel_df = pd.read_csv(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLS_model.txt')\n TLSbestfit_df = pd.read_csv(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLS_bestfit.txt')\n TLSTCs_df = pd.read_csv(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLS_TCs.txt')\n TLStxt_array=list(TLSbestfit_df.columns.values)\n ###\n # EDI-Vetter Results for False Positive Flags\n EDI_results = pd.read_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_EDI_results.txt\",index_col=0)\n ###\n #reading in a lot of dataframes can take up RAM, this can help clear it with gc.collect()\n import gc \n #reading in a lot of dataframes can take up RAM, this can help clear it with gc.collect()\n ###\n import time as clock\n start=clock.time()\n ###\n #fontsize\n fs=12\n ###\n fig = plt.figure(figsize=(12,10))\n gs1 = gridspec.GridSpec(6, 3)#4,2\n #3x3 grid, (left,middle,right) helps makes sense of placement. gs1[height, width]\n left=0\n middle=1\n right=2\n ### \n #Odd/Even plot\n ax0 = fig.add_subplot(gs1[0:2,left])\n plot_odd_even_transits(LC_df, TLSbestfit_df, TLSTCs_df, TLSmodel_df, ax0,fig)\n ### \n #Lightcurve\n ax1 = fig.add_subplot(gs1[0, middle:])\n ### \n #Power Spectrum\n ax2 = fig.add_subplot(gs1[1, middle:])\n plot_power_spectra(TLS_df,TLSbestfit_df, ax2)\n ###\n #Phasefolded Light Curves\n ax3 = fig.add_subplot(gs1[2, middle])\n ax4 = fig.add_subplot(gs1[2, right])\n ax5 = fig.add_subplot(gs1[3, middle])\n ax6 = fig.add_subplot(gs1[3, right])\n plot_phasefold_LCs(ID,Sector,LC_df,TLS_df,TLSbestfit_df,TLSTCs_df,TLSmodel_df, ax1,ax3,ax4,ax5,ax6)\n ### \n # TESS Image Cutout\n ax7_placement=gs1[2:4,left]\n plot_image(ID,Sector,cadence,path,ax7_placement,fig,fs,for_injections=for_injections)\n ### \n #Centroid Motion plot\n ax8 = fig.add_subplot(gs1[4:6,left])\n plot_centroids_in_phase(LC_df,TLSbestfit_df,ax8)\n ###\n # DSS Image\n ax9 = fig.add_subplot(gs1[4:6, middle])\n plot_dss_orientation(ax9,ID,downloadpath,cutoutsize=11)\n ### \n #plot text from stellar and planet parameters\n plot_text(ID,Sector,TLSbestfit_df,TLSTCs_df,EDI_results,fig)\n ###\n gs1.update(wspace=0.0, hspace=0.0)\n gs1.tight_layout(fig)\n fig.savefig(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLSReport.png',bbox_inches='tight')\n# fig.show()\n plt.close()\n ###\n end=clock.time()\n\n runtime=end-start\n\n # clear garbage collection in RAM\n gc.collect()\n\n if runtime > 60:\n print('report runtime: '+str(runtime/60)+' minutes')\n if runtime < 60:\n print('report runtime: '+str(runtime)+' seconds') \n \n ###\n ### Last Step: Clear image data (FITS and PKL files) to save space\n ### (optional)\n ### \n #delete image data(aperture/background masks, cutout images)\n if keep_imagedata==False:\n pixmask_filename=\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_pix_mask\"\n bkgmask_filename=\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_bkg_mask\"\n image_filename=\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_image_fluxes\" \n os.system(\"rm \" + savelcpath+pixmask_filename+'.pkl') \n os.system(\"rm \" + savelcpath+bkgmask_filename+'.pkl') \n os.system(\"rm \" + savelcpath+image_filename+'.pkl') \n if keep_FITS==False:\n # deleting FITS files (no longer need them for light curve processing\n # can always download again)\n os.system(\"rm -r \" + downloadpath) #delete cache path \n \ndef Transit_Pipeline(threshold, ID,Sector,cadence,input_LC,N_transits,minP,oversampling_factor,duration_grid_step,path,keep_FITS=True,keep_imagedata=True,for_injections=False):\n \n ### first do BLS\n print('')\n print('doing BLS search')\n PowerSpectrum_df, TransitModel_df, TransitParams_df = TransitSearch('BLS',ID,Sector,cadence,input_LC,N_transits,minP,oversampling_factor,duration_grid_step,path,for_injections)\n ###\n if isinstance(PowerSpectrum_df, type(None)):\n print('problem with infinite/nan values inPower Spectrum')\n return None, None, None, None, None, None\n #make BLS Plot\n Transit_plot(ID,Sector,cadence,'BLS',input_LC, PowerSpectrum_df,TransitModel_df, TransitParams_df, path, for_injections)\n ###\n ### check if SDE is above threshold\n if (np.nanmax(np.array(PowerSpectrum_df['BLS SDE']))) > threshold:\n print('')\n print('BLS max peak > threshold!')\n print('')\n PowerSpectrum_df2, TransitModel_df2, TransitParams_df2 = TransitSearch('TLS',ID,Sector,cadence,input_LC,N_transits,minP,oversampling_factor,duration_grid_step,path, for_injections)\n if isinstance(PowerSpectrum_df2, type(None)):\n print('problem with infinite/nan values inPower Spectrum')\n return None, None, None, None, None, None \n else:\n ###\n #make TLS Plot\n Transit_plot(ID,Sector,cadence,'TLS',input_LC, PowerSpectrum_df2,TransitModel_df2, TransitParams_df2, path, for_injections)\n ###\n # make TLS Report\n TLS_Report(ID,Sector,cadence,path,keep_FITS=keep_FITS,keep_imagedata=keep_imagedata)\n \n \n return PowerSpectrum_df, TransitModel_df, TransitParams_df, PowerSpectrum_df2, TransitModel_df2, TransitParams_df2\n else:\n print('BLS signal < threshold')\n return PowerSpectrum_df, TransitModel_df, TransitParams_df, None,None,None\n \n \n##########################################\n##########################################\n####### TRANSIT SEARCHING FUNCTIONS ######\n##########################################\n##########################################\n\n\n\n\n##########################################\n##########################################\n####### TRANSIT INJECTION FUNCTIONS ######\n##########################################\n##########################################\n\ndef Make_dirs_injection(path,Sector,cadence,Period,R_planet_RE):\n import os\n #Step 0: Creating directories to save figures and data\n path=path+'Sector_'+str(Sector)+'/'\n path = path+'Period_'+str(np.round(Period,2))+'_RP_'+str(np.round(R_planet_RE,2))+'/'\n \n savefigpath1 = path+'FFI_PLD_plots/'\n savelcpath1 = path+'FFI_PLD_LCs/'\n savefigpath2 = path+'TPF_PLD_plots/'\n savelcpath2 = path+'TPF_PLD_LCs/' \n downloadpath = path+'cache/'\n ###\n if cadence=='long':\n savefigpath=savefigpath1\n savelcpath=savelcpath1\n downloadpath=downloadpath\n if os.path.exists(savefigpath1)==True:\n pass\n else: \n os.makedirs(savefigpath1)\n if os.path.exists(savelcpath1)==True:\n pass\n else:\n os.makedirs(savelcpath1) \n if os.path.exists(downloadpath)==True:\n pass\n else: \n os.makedirs(downloadpath) \n if cadence=='short': \n savefigpath=savefigpath2\n savelcpath=savelcpath2\n downloadpath=downloadpath\n if os.path.exists(savefigpath2)==True:\n pass\n else: \n os.makedirs(savefigpath2)\n if os.path.exists(savelcpath2)==True:\n pass\n else:\n os.makedirs(savelcpath2)\n if os.path.exists(downloadpath)==True:\n pass\n else: \n os.makedirs(downloadpath) \n ### \n return path, savefigpath, savelcpath, downloadpath\n\n\ndef transit_injection(input_LC, Period, T0, R_planet_RE, ID, Sector):\n import pandas as pd \n \n T=np.array(input_LC['Time'])\n F=np.array(input_LC['SAP Flux'])\n E=np.array(input_LC['SAP Error'])\n #calculate the cadence (exposure time) in TESS data\n texp=np.nanmedian(np.diff(T))\n print('cad',texp)\n #looking up Stellar parameters from the TIC on MAST\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n qld_a=qld[0]\n qld_b=qld[1]\n\n #physical constants\n RS = 6.955*10.0**10.0 #cm, solar radius\n MS = 1.989*10.0**33.0 #grams, solar mass\n RE = 6.378*10.0**8.0 #cm, earth radius\n \n # arbitrarily choosing for transits to start at 20th data point\n # for 30 minute cadence, this is about 10 hrs after observations start\n \n time_start = T0 #remember Python uses zero indexing (0=first point) \n \n SMA, SMA_cm = SMA_AU_from_Period_to_stellar(Period,R_star,M_star)\n \n \n #for transit injection\n import batman\n\n # Using Batman to inject transits into background subtracted and normalized Flux\n # Note: At this point, Flux is not yet detrended (Simple Aperture Photometry)\n ma = batman.TransitParams()\n ma.t0 = time_start # time of inferior conjunction; first transit is X days after start\n ma.per = Period # orbital period\n ma.rp = (R_planet_RE*RE)/(R_star*RS) #in units of stellar radii\n ma.a = SMA # semi-major axis (in units of stellar radii)\n \n # the following 3 parameters are assumed for a perfect,\n # across the star's face, transit\n ma.inc = 90 # orbital inclination (in degrees)\n ma.ecc = 0 # eccentricity\n ma.w = 90 # longitude of periastron (in degrees)\n\n ma.u = [qld_a, qld_b] # limb darkening coefficients\n ma.limb_dark = \"quadratic\" # limb darkening model\n# print(' ')\n# print('injected params:')\n# print(ma.rp,'planet radius in stellar radii')\n# print(R_planet_RE,'planet radius in Earth radii')\n# print(ma.a,'SMA in stellar radii')\n# # print(SMA,'SMA in AU')\n# print(ma.t0,'transit starting time (TESS JD)')\n# print(ma.per, 'orbital period (days)')\n# print('')\n \n t = np.linspace(np.min(T),np.max(T),len(T))\n \n# m = batman.TransitModel(ma, t,supersample_factor = 7, exp_time=texp) # initializes model\n m = batman.TransitModel(ma, T, supersample_factor = 7, exp_time=texp) # initializes model \n synthetic_signal = m.light_curve(ma) # calculates light curve\n\n injectedflux = F+synthetic_signal-1 #adding 1 to make baseline = 1\n\n inj_LC = pd.DataFrame({'Time':T,'SAP Flux':injectedflux,'SAP Error':E, 'Injected Model':synthetic_signal})\n \n inj_params= pd.DataFrame({'Period':ma.per,'T0':ma.t0,'Planet Radius [RS]':ma.rp,'Injected Radius':R_planet_RE,'SMA [RS]':ma.a},index=[0])\n \n return inj_LC, inj_params\n\n\n\ndef full_pipeline_injection(ID,cutoutsize,Sector,minimum_photon_counts,threshold,pld_order,n_pca_terms, Nsigma_low, Nsigma_high, remove_outliers, before_after_in_minutes, path, cadence, verbose, Period, R_planet_RE,T0, keep_FITS=True, keep_imagedata=True, window_size_in_days=None,use_SPOC_aperture='no'): \n from transitleastsquares import catalog_info \n import sys\n ###\n #first, check if target has known stellar radius and/or mass:\n from transitleastsquares import catalog_info \n try:\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:\n clock.sleep(5) #pause 5 seconds then try again\n qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)\n if np.isfinite(R_star)==False or np.isfinite(M_star)==False:\n print('TIC '+str(ID)+' has no known Stellar Mass or Radius in TIC')\n return\n else:\n ###\n ###\n ###\n print('TIC '+str(ID)+' Sector '+str(Sector))\n #Step 0: Creating directories to save figures and data\n if verbose==True:\n print('Step 0: Making Directories')\n print(' ')\n path, savefigpath, savelcpath,downloadpath = Make_dirs_injection(path,Sector,cadence,Period,R_planet_RE)\n ###\n ###\n ###\n #Step 1: Obtaining HDU for FFI/TPF\n if verbose==True:\n print('Step 1: Obtaining HDU from FFI/TPF')\n print(' ')\n try:\n hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize,cadence,minimum_photon_counts,verbose,downloadpath)\n ###\n ### \n print(' ')\n if hdu==None:\n #print('No Image data for TIC '+str(ID)+' in Sector '+ str(Sector)+'!!!')\n sys.exit('No Image data for TIC '+str(ID)+' in Sector '+ str(Sector)+'!!!') \n except AttributeError as AE:\n print(AE)\n sys.exit('No Image data for TIC '+str(ID)+' in Sector '+ str(Sector)+'!!!') \n ###\n ###\n ###\n ###\n if verbose==True:\n print('Step 2: Performing Background Subtraction and Simple Aperture Photometry')\n print(' ')\n try:\n bkg_mask, pix_mask ,flux, median_image, SAP_LC, flux_contamination_ratio = SAP(ID,Sector,cutoutsize,hdu,quality_mask,threshold,cadence,reference_pixel,verbose,savelcpath,use_SPOC_aperture='no')\n ###\n ###\n ###\n except TypeError as TE:\n print(TE)\n print('Unable to create aperture mask. Skipping this target...')\n return \n ###\n ###\n ###\n if len(SAP_LC['SAP Error'])==0:\n print(' ')\n print('Uneven array lengths, FFI likely on edge of detector/partially shown')\n return\n ###\n ### Transit Injection begins\n mdumps,t_0,t_1 = momentumdump_check(Sector)\n if T0==None:\n #random pt between beginning of sector and end of 1st orbit\n T0 = np.random.uniform(low=t_0,high=t_1) \n else: \n T0=t_0+1 #1 day after start of sector, fixed first transit time\n input_LC = SAP_LC\n inj_LC, inj_params = transit_injection(input_LC,Period, T0, R_planet_RE, ID, Sector)\n #save raw injected LC\n inj_LC.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_TransitInjected_RAWLC.txt\",index=False)\n #save params\n inj_params.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_TransitInjected_param.txt\",\\\n index=False) \n SAP_LC['SAP Flux'] = inj_LC['SAP Flux']\n SAP_LC['SAP Error']= inj_LC['SAP Error'] \n print('Injecting Transit: P= '+str(Period)+'d; RP= '+str(R_planet_RE)+' T0: '+str(T0))\n ### Transit Injection finished\n ###\n if verbose==True:\n print('Step 3: Removing Momentum dumps and regions of high jitter / Earth-Moon glare')\n print(' ')\n mask_mdump, mdumps,t_0,t_1, flux, RAWLC_df, clippedRAWLC_df = Applying_Mdump_removal(ID,Sector,Camera,CCD,before_after_in_minutes,SAP_LC,flux,savelcpath,verbose)\n ###\n ###\n ### saving pixel and background masks and image fluxes\n saveNDarr(pix_mask,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_pix_mask\")\n saveNDarr(bkg_mask,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_bkg_mask\")\n saveNDarr(flux,savelcpath,\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_image_fluxes\") \n ###\n ### calculating centroid positions throughout images and resaving to file\n cxs,cys = check_centroids(ID,Sector,cutoutsize,cadence,reference_pixel,savelcpath)\n time = np.array(clippedRAWLC_df['Time'])\n sap_flux=np.array(clippedRAWLC_df['SAP Flux'])\n sap_error=np.array(clippedRAWLC_df['SAP Error'])\n bkg_flux=np.array(clippedRAWLC_df['Background Flux'])\n clippedRAWLC_df = pd.DataFrame({\"Time\":time, \"SAP Flux\": sap_flux, \"SAP Error\":sap_error,\"Background Flux\":bkg_flux, \"Centroid X Positions\":cxs,\"Centroid Y Positions\":cys})\n clippedRAWLC_df.to_csv(savelcpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_RAW_LC_systematics_removed.txt\",index=False)\n ###\n if verbose==True:\n print('Step 4: Performing Pixel Level Decorrelation modeling')\n print(' ')\n ###\n ## work on making this flexible to take either PLD or SAP\n input_LC = pd.DataFrame({'Time':np.array(clippedRAWLC_df['Time']),\\\n 'Flux':np.array(clippedRAWLC_df['SAP Flux']),\\\n 'Error':np.array(clippedRAWLC_df['SAP Error']),\\\n \"Centroid X Positions\":np.array(clippedRAWLC_df[\"Centroid X Positions\"]),\\\n \"Centroid Y Positions\":np.array(clippedRAWLC_df[\"Centroid Y Positions\"])})\n\n PLD_LC = PLD_model(ID,Sector,flux,pix_mask,input_LC,savelcpath,pld_order=pld_order, n_pca_terms=n_pca_terms)\n ###\n ###\n ###\n if verbose==True:\n print('Step 5: Applying smoothing filter')\n print(' ') \n print('len check for step 5:')\n print('PLD T',len(np.array(PLD_LC['Time'])),'PLD F',len(np.array(PLD_LC['PLD Flux'])),'PLD E',len(np.array(PLD_LC['PLD Error'])))\n ## work on making this flexible to take either DET or SAP\n input_LC2 = pd.DataFrame({'Time':np.array(PLD_LC['Time']),\\\n 'Flux':np.array(PLD_LC['PLD Flux']),\\\n 'Error':np.array(PLD_LC['PLD Error']),\\\n 'Model':np.array(PLD_LC['PLD Model']),\\\n 'SAP Flux':np.array(input_LC['Flux']),\\\n 'SAP Error':np.array(input_LC['Error']),\\\n \"Centroid X Positions\":np.array(PLD_LC[\"Centroid X Positions\"]),\\\n \"Centroid Y Positions\":np.array(PLD_LC[\"Centroid Y Positions\"])})\n Det_LC, nanmask = BWMC_auto(ID,Sector,input_LC2,savelcpath) \n ###\n ### ensure PLD outputs and Detrended outputs have same length using nanmask output\n print('2nd len check for step 5: ')\n # print('T: ',len(Det_LC['Time']),'SAP F: ',len(PLD_LC['SAP Flux']), 'SAP E: ',len(PLD_LC['SAP Error']) ,\\\n # ' PLD F: ', len(PLD_LC['PLD Flux']),' PLD E: ', len(PLD_LC['PLD Error']),\\\n # ' Det F: ',len(Det_LC['Detrended Flux']), 'Det E: ',len(Det_LC['Detrended Error'])) \n # PLD_LC = pd.DataFrame({'Time':np.array(Det_LC['Time']),'SAP Flux':(PLD_LC['SAP Flux'])[nanmask],\\\n # 'SAP Error':(PLD_LC['SAP Error'])[nanmask],'PLD Flux':np.array(PLD_LC['PLD Flux'])[nanmask],\\\n # 'PLD Error':np.array(PLD_LC['PLD Error'])[nanmask],\\\n # 'PLD Model':np.array(PLD_LC['PLD Model'])[nanmask],\\\n # 'Centroid X Positions':np.array(PLD_LC['Centroid X Positions']),\\\n # 'Centroid Y Positions':np.array(PLD_LC['Centroid Y Positions'])})\n print('T: ',len(Det_LC['Time']),'SAP F: ',len(Det_LC['SAP Flux']), 'SAP E: ',len(Det_LC['SAP Error']),' PLD F: ', len(Det_LC['PLD Flux']),' PLD E: ', len(Det_LC['PLD Error']),' PLD M: ', len(Det_LC['PLD Model']),' Det F: ',len(Det_LC['Detrended Flux']), 'Det E: ',len(Det_LC['Detrended Error']),' Det M: ',len(Det_LC['Fitted Trend'])) \n ###\n ###\n ###\n if verbose==True:\n print('Step 6: Applying Outlier Removal (if set to \"yes\")')\n print(' ')\n ###\n #this needs to be the MOST flexible part to deal with combos of PLD, DET and SAP\n print('len check for step 6:')\n print('T: ',len(Det_LC['Time']),'SAP F: ',len(Det_LC['SAP Flux']), 'SAP E: ',len(Det_LC['SAP Error']),' PLD F: ', len(Det_LC['PLD Flux']),' PLD E: ', len(Det_LC['PLD Error']),' PLD M: ', len(Det_LC['PLD Model']),' Det F: ',len(Det_LC['Detrended Flux']), 'Det E: ',len(Det_LC['Detrended Error']),' Det M: ',len(Det_LC['Fitted Trend'])) \n input_LC3 = pd.DataFrame({'Time':np.array(Det_LC['Time']),\\\n 'SAP Flux':np.array(Det_LC['SAP Flux']),\\\n 'SAP Error':np.array(Det_LC['SAP Error']),\\\n 'Detrended Flux':np.array(Det_LC['Detrended Flux']),\\\n 'Detrended Error':np.array(Det_LC['Detrended Error']),\\\n 'Fitted Trend':np.array(Det_LC['Fitted Trend']),\\\n 'PLD Flux':np.array(Det_LC['PLD Flux']),\\\n 'PLD Error':np.array(Det_LC['PLD Error']),\\\n 'PLD Model':np.array(Det_LC['PLD Model']),\\\n \"Centroid X Positions\":np.array(Det_LC[\"Centroid X Positions\"]),\\\n \"Centroid Y Positions\":np.array(Det_LC[\"Centroid Y Positions\"])})\n ###\n ###\n LC_df, good_ind_DF, bad_ind_DF, preclipLC_df = outlier_removal(ID,Sector,input_LC3, remove_outliers, Nsigma_low,Nsigma_high,savelcpath,verbose,window_size_in_days=window_size_in_days) \n ###\n ###\n ###\n ###\n ###\n ###\n if verbose==True:\n print('Step 7: Plotting and Saving FFI and selected apertures')\n print(' ')\n plot_it_all_up(ID,Sector,cutoutsize,cadence,Nsigma_low,Nsigma_high,\\\n hdu,median_image,pix_mask,bkg_mask, RAWLC_df, \\\n clippedRAWLC_df, LC_df, good_ind_DF, bad_ind_DF, preclipLC_df, \\\n magnitude_limit=18,dot_scale=20,path=path,downloadpath=downloadpath)\n ###\n ###\n if keep_FITS==False:\n # deleting FITS files (no longer need them for light curve processing\n # can always download again)\n os.system(\"rm -r \" + downloadpath) #delete cache path\n ###\n ###\n print('FINAL LENGTHS :', ' T', len(LC_df['Time']),' Det F',len(LC_df['Detrended Flux']), ' trend',len(LC_df['Fitted Trend']), 'PLD model',len(LC_df['PLD Model']),' PLD F',len(LC_df['PLD Flux']),' SAP E', len(LC_df['SAP Error']))\n\n \n\n\ndef TLS_Report_injection(ID,Sector,cadence,path,keep_FITS=False,keep_imagedata=True, for_injections=True):\n ### ONLY DIFFERENCE IS TURNING OFF DSS PLOT\n ###\n # making a 1 page validation report summarizing the overall transit search \n ###\n if for_injections==False:\n Path=path+'Sector_'+str(Sector)+'/'\n if for_injections==True:\n Path=path\n if cadence=='long':\n saveReportpath = Path+'FFI_TLS_Report/'\n savelcpath= Path+'FFI_PLD_LCs/'\n downloadpath = Path+'cache/'\n if cadence=='short': \n saveReportpath = Path+'TPF_TLS_Report/'\n savelcpath= Path+'TPF_PLD_LCs/'\n downloadpath = Path+'cache/'\n ### \n #creating directory if it already doesn't exist\n if os.path.exists(saveReportpath)==True:\n pass\n else:\n os.makedirs(saveReportpath)\n ###\n #in case it was deleted by a previous run:\n if os.path.exists(downloadpath)==True:\n pass\n else:\n os.makedirs(downloadpath)\n ###\n ###\n #files needed to compile results\n # light curves and centroids\n LC_df = pd.read_csv(savelcpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_final_LC.txt')\n ###\n # TLS results\n TLS_df = pd.read_csv(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLS.txt')\n TLSmodel_df = pd.read_csv(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLS_model.txt')\n TLSbestfit_df = pd.read_csv(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLS_bestfit.txt')\n TLSTCs_df = pd.read_csv(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLS_TCs.txt')\n TLStxt_array=list(TLSbestfit_df.columns.values)\n ###\n # EDI-Vetter Results for False Positive Flags\n EDI_results = pd.read_csv(saveReportpath+\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_EDI_results.txt\",index_col=0)\n ###\n #reading in a lot of dataframes can take up RAM, this can help clear it with gc.collect()\n import gc \n #reading in a lot of dataframes can take up RAM, this can help clear it with gc.collect()\n ###\n import time as clock\n start=clock.time()\n ###\n #fontsize\n fs=12\n ###\n fig = plt.figure(figsize=(12,10))\n gs1 = gridspec.GridSpec(6, 3)#4,2\n #3x3 grid, (left,middle,right) helps makes sense of placement. gs1[height, width]\n left=0\n middle=1\n right=2\n ### \n #Odd/Even plot\n ax0 = fig.add_subplot(gs1[0:2,left])\n plot_odd_even_transits(LC_df, TLSbestfit_df, TLSTCs_df, TLSmodel_df, ax0,fig)\n ### \n #Lightcurve\n ax1 = fig.add_subplot(gs1[0, middle:])\n ### \n #Power Spectrum\n ax2 = fig.add_subplot(gs1[1, middle:])\n plot_power_spectra(TLS_df,TLSbestfit_df, ax2)\n ###\n #Phasefolded Light Curves\n ax3 = fig.add_subplot(gs1[2, middle])\n ax4 = fig.add_subplot(gs1[2, right])\n ax5 = fig.add_subplot(gs1[3, middle])\n ax6 = fig.add_subplot(gs1[3, right])\n plot_phasefold_LCs(ID,Sector,LC_df,TLS_df,TLSbestfit_df,TLSTCs_df,TLSmodel_df, ax1,ax3,ax4,ax5,ax6)\n ### \n # TESS Image Cutout\n ax7_placement=gs1[2:4,left]\n plot_image(ID,Sector,cadence,path,ax7_placement,fig,fs,for_injections=for_injections)\n ### \n #Centroid Motion plot\n ax8 = fig.add_subplot(gs1[4:6,left])\n plot_centroids_in_phase(LC_df,TLSbestfit_df,ax8)\n ###\n # DSS Image\n ax9 = fig.add_subplot(gs1[4:6, middle])\n plot_dss_orientation(ax9,ID,downloadpath,cutoutsize=11,do_DSS_plot=False) #<---THE ONLY DIFFERENCE\n ### \n #plot text from stellar and planet parameters\n plot_text(ID,Sector,TLSbestfit_df,TLSTCs_df,EDI_results,fig)\n ###\n gs1.update(wspace=0.0, hspace=0.0)\n gs1.tight_layout(fig)\n fig.savefig(saveReportpath+'TIC_'+str(ID)+'_Sector_'+str(Sector)+'_TLSReport.png',bbox_inches='tight')\n# fig.show()\n plt.close()\n ###\n end=clock.time()\n\n runtime=end-start\n\n # clear garbage collection in RAM\n gc.collect()\n\n if runtime > 60:\n print('report runtime: '+str(runtime/60)+' minutes')\n if runtime < 60:\n print('report runtime: '+str(runtime)+' seconds') \n \n ###\n ### Last Step: Clear image data (FITS and PKL files) to save space\n ### (optional)\n ### \n #delete image data(aperture/background masks, cutout images)\n if keep_imagedata==False:\n pixmask_filename=\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_pix_mask\"\n bkgmask_filename=\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_bkg_mask\"\n image_filename=\"TIC_\"+str(ID)+\"_Sector_\"+str(Sector)+\"_image_fluxes\" \n os.system(\"rm \" + savelcpath+pixmask_filename+'.pkl') \n os.system(\"rm \" + savelcpath+bkgmask_filename+'.pkl') \n os.system(\"rm \" + savelcpath+image_filename+'.pkl') \n if keep_FITS==False:\n # deleting FITS files (no longer need them for light curve processing\n # can always download again)\n os.system(\"rm -r \" + downloadpath) #delete cache path\n \n \ndef Transit_Pipeline_injection(threshold, ID,Sector,cadence,input_LC,N_transits,minP,oversampling_factor,duration_grid_step,path,keep_FITS=True,keep_imagedata=True, for_injections=True):\n \n ### first do BLS\n print('')\n print('doing BLS search')\n PowerSpectrum_df, TransitModel_df, TransitParams_df = TransitSearch('BLS',ID,Sector,cadence,input_LC,N_transits,minP,oversampling_factor,duration_grid_step,path, for_injections)\n ###\n if isinstance(PowerSpectrum_df, type(None)):\n print('problem with infinite/nan values inPower Spectrum')\n return None, None, None, None, None, None\n #make BLS Plot\n Transit_plot(ID,Sector,cadence,'BLS',input_LC, PowerSpectrum_df,TransitModel_df, TransitParams_df, path, for_injections)\n ###\n ### check if SDE is above threshold\n if (np.nanmax(np.array(PowerSpectrum_df['BLS SDE']))) > threshold:\n print('')\n print('BLS max peak > threshold!')\n print('')\n PowerSpectrum_df2, TransitModel_df2, TransitParams_df2 = TransitSearch('TLS',ID,Sector,cadence,input_LC,N_transits,minP,oversampling_factor,duration_grid_step,path, for_injections)\n if isinstance(PowerSpectrum_df2, type(None)):\n print('problem with infinite/nan values inPower Spectrum')\n return None, None, None, None, None, None \n else:\n ###\n #make TLS Plot\n Transit_plot(ID,Sector,cadence,'TLS',input_LC, PowerSpectrum_df2,TransitModel_df2, TransitParams_df2, path, for_injections)\n ###\n # make TLS Report\n TLS_Report_injection(ID,Sector,cadence,path,keep_FITS=keep_FITS,keep_imagedata=keep_imagedata, for_injections=for_injections)\n \n \n return PowerSpectrum_df, TransitModel_df, TransitParams_df, PowerSpectrum_df2, TransitModel_df2, TransitParams_df2\n else:\n print('BLS signal < threshold')\n return PowerSpectrum_df, TransitModel_df, TransitParams_df, None,None,None \n\n##########################################\n##########################################\n####### TRANSIT INJECTION FUNCTIONS ######\n##########################################\n##########################################\n\n\n\n\n\n#########################################################\n#########################################################\n################### Change Log ##########################\n#########################################################\n#########################################################\n\n# ## Change log for V13 (Now called NEMESIS V1.1) : April 22nd, 2021 (updating after lightkurve 2.0 came out)\n# ## - Updated gethdu to better handle 504 Gate TimeOuts from MAST. \n# ## Now uses a while loop until it downloads properly.\n\n# ## Change log for V12 (Now called NEMESIS V1.0) : January 8th, 2021 (After receiving Referee's report from AJ)\n# ## - Added flux contamination (deblending) correction function to pipeline: \n# ## Will query TIC for nearby stars within 63 arcseconds and find flux contam ratio\n# ## Then it will subtract ratio from normalized SAP and then renormalized before \n# ## being passed on to rest of pipeline.\n# ## - Added aesthetic changes to clean up light curve summary figures\n# ## (used to be \"*_PLD.png\", is now \"*_LC_summary.png\"), TLS validation \n# ## reports, and _________\n# ## - Added Binning function based on inverse variance weighting\n# ## (Also used in AstroImageJ, courtesty of Dr. Karen Collins).\n# ## This isn't used anywhere but is very useful! \n# ## - Added injection pipeline which used to be a separate script.\n\n\n# ## Change log for V11: August 17th, 2020\n# ## - Added BLS functionality, same inputs and outputs as TLS (except FAP will be NaN)\n# ## - Added Transit_Pipeline that does BLS first and if BLS peak > SDE threshold, \n# ## it will then run TLS. Only TLS gets full reports. May add BLS reports in future...\n# ## - After some testing, decided to remove positive consecutive outliers. \n# ## May affect transit searches.\n# ## - Added a function to center FFI cutouts and rewrite WCS headers based on \n# ## centered pixel coordinates.\n# ## - Added function to plot either TESS or GAIA sources for nearby stars.\n# ## - Condensed FFI/TPF plotting instances to use same command: plot_cutouts\n# ## - Went back to original pipeline order: 1)Get Images, 2) SAP, 3) PLD, \n# ## 4) Smoothing, 5) Outlier Removal, 6) save light curves and plots\n\n\n# ## Change log for V10: June 30th,2020\n# ## - Revisted outlier removal and added option for a sliding outlier remover\n# ## that keeps consecutive data points (like transits) which requires the \n# ## inputs for method (\"sliding\"), window, number of standard deviations\n# ## and whether to use global noise of full light curve or just noise within\n# ## sliding window (\"global\", \"local\")\n\n# ## Change log for V9: June 4th, 2020\n# ## - Rewritten and condensed functions for easier understanding of each \n# ## step in pipeline within the \"PLD_FFI\" function. \n# ## - Modified outlier removal to include data points below/above a user\n# ## defined threshold that are consecutive (like transits/flares). \n\n# ## Change log for V8: May 12, 2020 \n# ## - Added features to plot other TESS stars in FFI based on TIC RA/DEC, \n# ## and now shows sky background\n# ## - Changed order of operations in light curve extraction:\n# ## - Steps are now, 1) get FFI 2) make pixel mask 3) do background subtraction\n# ## 4) Remove momentum dumps 5) Detrend 6) PLD 7) save lightcurves 8) plot\n# ## - Tweaked momentumdump removal to better clip out bad regions of data\n# ## - momentumdump_removal now returns a boolean mask instead of time,flux,error\n# ## arrays\n# ## - Light curves before removal of systematics/detrending are now saved as \n# ## \"TIC_IDNum_Sector_SectorNum_RAW_LC.txt\" in PLD_LCs directory\n# ## - Provided user option to turn on/off outlier removal with \"yes\"/\"no\" input\n# ## - Tweaked outlier removal to use a sliding sigma clipper using a 3 hr window\n# ## and removing data points within 3 std above and 7 std below the median flux \n# ## - Provided user option to select window of data to remove around momentum dumps\n# ## \"before_after_in_minutes\"\n# ## - Added \"smooth_window\" as input for PLD so user can change window size for\n# ## smoothing the flux in units of hours. \n# ## - Now instead of having Jupyter Notebooks with definitions, I am now using\n# ## separate Python scripts to import custom functions. I should look into \n# ## using Classes for definitions...\n\n\n\n# ## Change log for V7: \n# ## Added capability to use more than 2nd order PLD. Can do any order user desires.\n# ## Added sigma_clip for outlier removal (works better). Uses higher sigma for points \n# ## below than above to avoid truncating potential transit depths.\n\n# ## Change log for V6: \n# ## - Can now be used for either 30 minute FFI file or 2 minute TPF file structures. \n# ## PLD_FFI function now requires cadence input of \"long\" or \"short\", \n# ## similar to lightkurve.\n# ## - To obtain TPFs, we still use lightkurve's search_targetpixelfile function. \n# ## It's just easier than coming up with a query from scratch. \n# ## - Will create FFI and TPF directories for light curve files and plots depending\n# ## on user selected cadence mode.\n\n# ## Change log for V5: \n# ## -Modified aperture selection for target stars. Before, only the brightest pixels in the FFI cutout were used. \n# ## This meant that neighboring stars that were brighter or similarly bright as the target star were being included \n# ## in the pixel mask, creating a blended light curve and adding additional steps to manual vetting.\n# ## -New aperture selection requires the RA and DEC of the target star. This is done with the \"RaandDec_to_XandY\" \n# ## function which converts RA and DEC to pixel X,Y coordinates. With the X,Y pixel coordinates, the new aperture \n# ## mask function titled \"thresholdmask\" uses a sigma threshold level to determine the closest and brightest pixels \n# ## to the target star.\n\n# ## Change log for V4:\n# ## - Modified outlier removal to make Nsigma a variable instead of 7 (still set to 7, may change in future versions)\n# ## - Fixed calculated errors. Previous versions had HUGE errors (+/- 3 flux units). Currently uniform errors based \n# ## on PLD detrended flux values ~ +/- 0.005 (more reasonable).\n# ## - Working Directory path is now an input to PLD_FFI so pathnames aren't hardcoded. Just run this script from \n# ## where ever you want to place files.\n# ## - Added more rigorous ID matching when using Astroquery to avoid accidentally matching to nearby bright stars. \n# ## Also added a 21 arcsec radial cone search (TESS' pixel scale) instead of the 30 arcsec angular size used before.\n# ## - Edited background pixel mask to ignore the brightest pixel(s) in the FFI cutout, variable name: \"\n# ## except_these_pixels\"\n# ## - Updated PLD portion to accept situations where there are more than one solution for solving a*W=b. \n# ## If more than one solution, Numpy's least squares solver will be used and first solution is selected.\n\n\n# ## Change log for V3:\n# ## - modified order of systematic removal, detrending and outlier removal \n# ## - Outlier removal now does sweeps looking applying an iterative 5-sigma cut and smoothing procedure followed \n# ## by a final cut which uses 7 times the rms of the detrended light curve.\n# ## - Modified background subtraction to ignore 4 brightest pixels in image cutout instead of before where we \n# ## selected 100 brightest pixels. For small cutout sizes (<10), this would select all pixels as the background.\n\n# \n# ## Change log for V2:\n# ## - tweaked plotting of FFI image to combine with PLD/SAP light curve comparison \n# ## - Added TLS (with momentum dump markers)\n# ## - Added Plotting script to show each TLS modeled event vertically separated\n# \n# ## Change log for V1:\n# ## - Introduced Pixel Level Decorrelation (explanation, directory creation, SAP, PLD, plotting, saving \n# ## light curves/images to directories)\n","sub_path":"nemesis/NEMESIS_pipeline.py","file_name":"NEMESIS_pipeline.py","file_ext":"py","file_size_in_byte":227186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"543602922","text":"import re\nimport numpy as np\n\n\nclass Sample:\n @staticmethod\n def extract_name(data: [str]) -> str:\n line = data[0]\n if '[' in line or ']' in line:\n raise ValueError(\"Can't extract name from:\", data)\n return line\n\n @staticmethod\n def extract_codevector(data: [str]) -> [[float]]:\n tmp = \"\"\n reading = False\n for id, line in enumerate(data):\n if '[' in line:\n tmp = line\n reading = True\n elif ']' in line and reading:\n tmp += line\n rawVector = re.sub(r'\\[|\\|\\n]', '', tmp)\n return np.fromstring(rawVector, np.float32, sep=' ').tolist()\n elif reading:\n tmp += line\n raise ValueError(\"Can't extract code vector from:\", data)\n\n @staticmethod\n def extract_label(data: [str]) -> int:\n for id, line in enumerate(data):\n if len(line) <= 2:\n label = int(line)\n if label == 0 or label == 1:\n return label\n raise ValueError(\"No label found in:\", data)\n\n def __init__(self, rawData: [str]):\n self.name = Sample.extract_name(rawData)\n self.label = Sample.extract_label(rawData)\n self.codeVector = Sample.extract_codevector(rawData)\n if not self.valid_sample():\n raise ValueError(\"Invalid input:\", rawData)\n\n def __init__(self, name, label, codevector):\n self.name = name\n self.label = int(label)\n self.codeVector = codevector\n if not self.valid_sample():\n raise ValueError(\"Invalid input:\", str(self))\n\n def valid_sample(self) -> bool:\n return len(self.codeVector) == 128 and (self.label == 0 or self.label == 1) and len(self.name) > 3\n\n def __str__(self):\n return '\\n'.join([self.name, str(self.label), np.array2string(np.asarray(self.codeVector))]) + '\\n'\n","sub_path":"JackTheLoggerNet/Sample.py","file_name":"Sample.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"245771167","text":"def pairInSortedRotated(arr, n, x):\r\n # Find the pivot element\r\n for i in range(0, n - 1):\r\n if (arr[i] > arr[i + 1]):\r\n break;\r\n\r\n # l is now index of smallest element\r\n l = (i + 1) % n\r\n # r is now index of largest element\r\n r = i\r\n\r\n # Keep moving either l\r\n # or r till they meet\r\n while (l != r):\r\n\r\n # If we find a pair with\r\n # sum x, we return True\r\n if (arr[l] + arr[r] == x):\r\n return True;\r\n\r\n # If current pair sum is less,\r\n # move to the higher sum\r\n if (arr[l] + arr[r] < x):\r\n l = (l + 1) % n;\r\n else:\r\n\r\n # Move to the lower sum side\r\n r = (n + r - 1) % n;\r\n\r\n return False;\r\n\r\n\r\n# Driver program to test above function\r\narr = [11, 15, 26, 38, 9, 10]\r\nsum = 16\r\nn = len(arr)\r\n\r\nif (pairInSortedRotated(arr, n, sum)):\r\n print(\"Array has two elements with sum 16\")\r\nelse:\r\n print(\"Array doesn't have two elements with sum 16 \")","sub_path":"Given a sorted and rotated array, find if there is a pair with a given sum.py","file_name":"Given a sorted and rotated array, find if there is a pair with a given sum.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"645901764","text":"coords = get_data(fname, 1, p_type, 'Coordinates')\r\nvelocities = get_data(fname, 1, p_type, 'Velocities')\r\n\r\nradius = ''# Distance from host center\r\n\r\n\r\n# dispersion1 = []\r\n# rad1 = []\r\n# n = 0\r\n# radii = 0.40\r\n# while n < 29 :\r\n\r\n# In general while loops are the slowest of the loop types in Python\r\n# Most of the time you can replace them with a for loop. One of the more\r\n# common times you can't is when you need a dynamic pointer to a list.\r\n# Here, we can use a for loop with a little preparation. \r\n\r\n# Since you are dividing particles into bins, we can define the edges ahead \r\n# of time and then iterate through the list starting at the second value. \r\n# Additionally, we know the number of bins, and therefore, how large the\r\n# dispersion list needs to be (number of bin edges - 1).\r\n\r\nRMAX = 300\r\nradial_bins = np.arange(0, RMAX, 1)\r\ndispersion = np.zeros(radial_bins.size - 1)\r\n\r\nfor i in range(1, len(radial_bins)): # Start at i = 1 since we're making shells\r\n # index = radius > radii\r\n # master_velocity = velocities[index]\r\n\r\n # index1 = radius < radii + increment\r\n # master_velocity = velocities[index1]\r\n \r\n # I like your idea here of slicing the list but it can be done together.\r\n # We'll use two boolean masks to do this and combine them with an '&'.\r\n # Also, I think you have a typo, because you overwrite master_velocity\r\n # with the use of index1.\r\n\r\n mask = (radius >= radial_bins[i-1]) & (radius < radial_bins[i])\r\n\r\n # v_xav = np.sum((master_velocity[:,0])) / ( len(master_velocity))\r\n # v_yav = np.sum((master_velocity[:,1])) / ( len(master_velocity))\r\n # v_zav = np.sum((master_velocity[:,2])) / ( len(master_velocity))\r\n # v_av = [v_xav,v_yav,v_zav ]\r\n\r\n # difference = np.zeros(len(master_velocity))\r\n\r\n # average_v = np.repeat(average_velocity,len(master_velocity) )\r\n # a = master_velocity\r\n # b = average_v\r\n\r\n # difference = [a_i - b_i for a_i, b_i in zip(a, b)]\r\n \r\n # We can take advantage of NumPy's matrix/vector math implementation\r\n # to do all of this in a few steps. Also, NumPy arrays have a set\r\n # of convenience methods allowing you to do common statistical \r\n # calculations on them (.sum(), .mean(), .std(), .var() and more)\r\n\r\n v_avg = velocities[mask].mean() \r\n difference = velocities[mask] - v_avg \r\n\r\n # sig = np.zeros(len(difference))\r\n # for j in range (0, len(difference)):\r\n # sigma = np.square(difference[j])\r\n # sig[j] = np.sum(sigma)\r\n\r\n # sigg = np.sqrt(sig)\r\n # disp = np.sum(sigg) / len(sig)\r\n \r\n # Again, with the power of NumPy, we can do elementwise calculations\r\n # Without ever needing to use a for loop. Occasionally, you will need\r\n # to specify an axis if you don't want to calculate something for the\r\n # whole array. Here, we need to specify 'axis=1' inside np.sum() to\r\n # make sure that it sums across (vx + vy + vz) versus summing down\r\n # each column (axis=0) or the whole thing (default behavior).\r\n\r\n sig = np.sqrt(np.sum(np.square(difference), axis=1))\r\n\r\n # dispersion1.insert(n,disp)\r\n # rad1.insert(n ,radii)\r\n # radii = radii + increment\r\n # n = n + 1\r\n \r\n # Finally, we can take our hard work and place it into the \r\n # dispersion array we created earlier, remembering that we chose\r\n # i to start at 1 and not 0.\r\n\r\n dispersion[i-1] = np.mean(sig)\r\n","sub_path":"old_shells.py","file_name":"old_shells.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"33803224","text":"from airflow import DAG\nfrom airflow.operators.dummy import DummyOperator\n\nfrom datetime import datetime\n\ndefault_args = {\n 'start_date': datetime(2021, 1, 1)\n}\n\nwith DAG('mktg_dag', tags=['marketing'], schedule_interval='@daily', \n default_args=default_args, catchup=False) as dag:\n\n task_a = DummyOperator(\n task_id=\"task_a\"\n )","sub_path":"dags/marketing_dag.py","file_name":"marketing_dag.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"183999710","text":"\"\"\"\nmódulo collections - Ordered Dict\n\nOrderedDict -> é um dicionario que garante a ordem de inserção dos elementos\n\n\"\"\"\n# # em um dicionário a ordem de inserção não é garantida.\n# dicionario = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}\n#\n# for chave, valor in dicionario.items():\n# print(f'chave = {chave}, valor = {valor}')\n\n# from collections import OrderedDict\n# dicionario = OrderedDict({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5})\n# print(dicionario)\n#\n# for chave, valor in dicionario.items():\n# print(f'chave = {chave}, valor = {valor}')\n\n\n\n# entender a diferença entre Dict e Ordered Dict\nfrom collections import OrderedDict\n# Dicionários comum\n\nDict1 = {'a': 1, 'b': 2}\nDict2 = {'b': 2, 'a': 1}\n\nprint(Dict1 == Dict2) # True -> a Ordem dos elementos não importa para o dicionário\n\n# Ordered Dict\n\nodict1 = OrderedDict({'a': 1, 'b': 2})\nodict2 = OrderedDict({'b': 2, 'a': 1})\n\nprint(odict1 == odict2) # False -> A Ordem dos elementos importa para o OrderedDict\n\n","sub_path":"Cursos Python/Python 3 Básico ao avançado - Geek University/Ordered Dict.py","file_name":"Ordered Dict.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"345257865","text":"#!/usr/bin/env python3\n\nimport json\nimport PyPDF2\nimport difflib\nimport glob\nimport re\nimport sys\nimport requests\nimport urllib.parse\nfrom bs4 import BeautifulSoup\n\n'''\nProgram flow\n\ninput pdfs\nextract DOIs\nextract Titles\nrun doi2bib\nrun title2bib\nexport .bib file\n\ninput .bib file\nrun scihub2pdf\n???\nprofit\n'''\n\ndef get_pdf(doi: str):\n url = get_pdf_url(doi)\n title = get_title_from_doi(doi).replace(' ', '_')\n\n with open(title + \".pdf\", \"wb\") as f:\n r = requests.get(url)\n f.write(r.content)\n\ndef get_pdf_url(doi: str):\n search_params = urllib.parse.quote(f\"{doi}\", safe='')\n r = requests.get(\"https://sci-hub.se/\" + search_params)\n\n soup = BeautifulSoup(r.text, features='html.parser')\n pdf_iframe = soup.find(\"iframe\", id='pdf')\n if not pdf_iframe:\n raise PdfNotFound\n return pdf_iframe.attrs[\"src\"]\n\nclass PdfNotFound(Exception):\n pass\n\n\n# feature:fetch bibtex ref from doi\ndef get_bib_from_doi(doi: str):\n search_params = urllib.parse.quote(f\"{doi}\", safe='')\n r = requests.get(\"http://api.crossref.org/works/%s/transform/application/x-bibtex\" % search_params)\n if r.status_code == 200:\n return(r.text)\n\ndef get_doi_from_bib(bib: list):\n dois_to_send = {}\n for ent in bib:\n if ent.find(\"doi = {\") != -1:\n doi = re.sub(\"\\},\", '', ent[ent.find(\"doi = {\")+7:].strip(\",\")).strip()\n print(doi)\n dois_to_send[doi] = {}\n return json.dumps(dois_to_send)\n\n\ndef download_bib(articles: str) -> None:\n dois = json.loads(articles)\n\n for doi in dois.keys():\n try:\n print(get_bib_from_doi(doi))\n except PdfNotFound:\n print(f\"Article {doi} not found.\", file=sys.stderr)\n\nclass PdfNotFound(Exception):\n pass\n\n\n#input doi number\n#output title of article associated with doi\ndef get_title_from_doi(doi: str):\n url = \"http://api.crossref.org/works/\" + doi\n r = requests.get(url)\n rj = r.json()\n if (rj[\"status\"] == \"ok\"):\n return rj[\"message\"][\"title\"][0]\n return \"Error in reading DOI\"\n\ndef list_articles(path):\n pdfs = [x for x in glob.glob(path + \"/**/*.pdf\", recursive=True)]\n\n dois_to_send = {}\n\n for pdf in pdfs:\n dois_to_send[get_doi(pdf)] = get_pdf_info(pdf)\n\n\n return json.dumps(dois_to_send)\n\ndef get_pdf_info(pdf: str):\n '''Return a dictionary of additional infomation squeezed form the pdf.\n\n Arguments:\n pdf (str): the path to the pdf to examine\n\n Return:\n dict: additonal info of the given pdf\n Possible keys:\n title (str): the paper's title\n pages (int): number of pages in the document\n authours (str): the authors of paper in a comma seperated string\n publisher (str): document's publisher\n '''\n # TODO: get info like title, pages, whatever to help identify\n return {}\n\ndef download_articles(articles: str) -> None:\n '''Get the articles of a `list_articles` JSON dump.\n\n Arguments:\n articles (str): the recieved string of articles\n\n '''\n dois = json.loads(articles)\n\n for doi in dois.keys():\n try:\n get_pdf(doi)\n except PdfNotFound:\n print(f\"Article {doi} not found.\", file=sys.stderr)\n\nclass PdfNotFound(Exception):\n pass\n\n\n#input journal article title as string\n#output doi string associated with journal article\ndef get_doi_from_title(title: str):\n title = title.replace(\" \", \"+\")\n url = \"https://api.crossref.org/works?rows=5&query.title=\" + title\n r = requests.get(url).json()\n\n return r[\"message\"][\"items\"][0][\"DOI\"]\n\n# attempts to extract (filename, doi, title) from each argument (expects pdf).\ndef get_doi(pdf: str):\n '''Return the doi of the given pdf\n\n Arguments:\n pdf (str): the path to the pdf to examine\n\n Return:\n str: doi of the given document\n '''\n try:\n doc = PyPDF2.PdfFileReader(open(pdf, \"rb\"))\n except:\n return\n try:\n page = doc.getPage(0) # first page\n except:\n return\n text = page.extractText().replace('\\n', ' ')\n reg = re.compile(r\"doi[: ]*10.[\\d ]{4,}[\\S]*[/]+[^\\s]*\", re.I) # case insensitive match\n try:\n x = re.search(reg, text).group()\n x = x.replace(' ', '').lower()\n except:\n print(\"no regex match\")\n return ''\n if x.find(\"doi\") != -1: # doi substring will be stripped\n x = x[x.find(\"doi\")+3:].strip(':')\n return(x)\n\ndef get_info(doc):\n\tinfo = doc.getDocumentInfo()\n\tif info[\"/Title\"][0:5].lower() == \"title\":\n\t\treturn(\"None\")\n\treturn(info[\"/Title\"])\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(f\"Usage: {sys.argv[0]} pdf\", file=sys.stderr)\n sys.exit(1)\n\n if sys.argv[1] == \"pdf2doi\": # extract dois from pdfs\n dois = list_articles(sys.argv[2])\n r = requests.post(\"http://127.0.0.1:5000/submit_dois\", data=dois)\n print(r.text)\n elif sys.argv[1] == \"pdf2bib\": # get bib refs from pdfs\n download_bib(list_articles(sys.argv[2]))\n elif sys.argv[1] == \"bib2pdf\": # get pdf from bib\n with open(sys.argv[2]) as s:\n dois = get_doi_from_bib(s.readlines())\n print(dois)\n download_articles(dois)\n elif sys.argv[1] == \"doi2pdf\": # get pdf from dois/titles\n r = requests.get('http://127.0.0.1:5000/get_pdfs/' + sys.argv[2])\n dois = r.text[:-1]\n download_articles(dois)\n\n else:\n print(f\"Usage: {sys.argv[0]} pdf\", file=sys.stderr)\n sys.exit(1)\n\n\n","sub_path":"doit.py","file_name":"doit.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"141747413","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport logging\n\ndef draw_polyline(img, vertices, color=[255, 0, 0], thickness=2, Closed = False):\n \"\"\"\n Simple method for drawing connected lines or polygons, given the\n set of points. Starting and ending point can be connected automatically\n to form closed polygon figure.\n \"\"\"\n cv2.polylines(img, vertices, Closed, color, thickness, lineType=cv2.LINE_AA)\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n \"\"\"\n Simple method for drawing set of individual lines, each defined by start and\n end points.\n \"\"\"\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef draw_label(img, text, pos, scale = 0.7, color = (0,0,0)):\n \"\"\"\n Method for displaying text on given part of the image.\n \"\"\"\n font_face = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)\n\ndef filter_image(img, l_thresh=(20, 100), s_thresh=(50, 120)):\n \"\"\"\n Taken from materials and modified.\n\n Performs image filtering based on the L and S channel (HLS), where each\n channel is filtered separately, thresholded, and then combined into single\n binary output. This is different from the material version where the S binary\n is directly combined with the sobel binary.\n \"\"\"\n # Convert to HLS color space and separate the S channel\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n l_channel = hls[:,:,1]\n s_channel = hls[:,:,2]\n\n # Sobel x, l channel\n sobel_l = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x\n abs_sobel_l = np.absolute(sobel_l) # Absolute x derivative to accentuate lines away from horizontal\n scaled_sobel_l = np.uint8(255*abs_sobel_l/np.max(abs_sobel_l))\n\n # Threshold x gradient\n l_binary = np.zeros_like(scaled_sobel_l)\n l_binary[(scaled_sobel_l >= l_thresh[0]) & (scaled_sobel_l <= l_thresh[1])] = 1\n\n # Sobel x, s channel\n sobel_s = cv2.Sobel(s_channel, cv2.CV_64F, 1, 0) # Take the derivative in x\n abs_sobel_s = np.absolute(sobel_s) # Absolute x derivative to accentuate lines away from horizontal\n scaled_sobel_s = np.uint8(255*abs_sobel_s/np.max(abs_sobel_s))\n\n # Threshold x gradient\n s_binary = np.zeros_like(scaled_sobel_s)\n s_binary[(scaled_sobel_s >= s_thresh[0]) & (scaled_sobel_s <= s_thresh[1])] = 1\n\n s_l_binary = np.zeros_like(s_binary)\n s_l_binary[(s_binary == 1) | (l_binary == 1)] = 1\n\n return s_l_binary\n\ndef find_pixels_mirror(binary_warped, params):\n \"\"\"\n Taken from materials.\n\n Method performs inital lane detection by using the 'mirror' algorithm. Since\n this method is likely resource intensive, it is only used in initial estimation\n or as fallback when other methods fail.\n \"\"\"\n # Take a histogram of the bottom half of the image\n histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0]//2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n\n # HYPERPARAMETERS\n # Choose the number of sliding windows\n nwindows = params['nwindows'] #9\n # Set the width of the windows +/- margin\n margin = params['margin'] # 100\n # Set minimum number of pixels found to recenter window\n minpix = params['minpix'] # 50\n\n # Set height of windows - based on nwindows above and image shape\n window_height = np.int(binary_warped.shape[0]//nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated later for each window in nwindows\n leftx_current = leftx_base\n rightx_current = rightx_base\n\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window + 1)*window_height\n win_y_high = binary_warped.shape[0] - window*window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n\n # Identify the nonzero pixels in x and y within the window #\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n # Concatenate the arrays of indices (previously was a list of lists of pixels)\n try:\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n except ValueError:\n # Avoids an error if the above is not implemented fully\n pass\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n return leftx, lefty, rightx, righty\n\ndef find_pixels_poly(binary_warped, left_fit, right_fit, params):\n \"\"\"\n Taken from materials and tailored for this pipeline.\n\n Based on existing lane polynomials, we choose a region with given margin arround\n the polynomial curves. This region is then used to select candidate points for next\n estimation. This algorithm is likely faster then 'mirror' algorithm is used whenever\n possible.\n \"\"\"\n # HYPERPARAMETER\n # Choose the width of the margin around the previous polynomial to search\n margin = params['margin'] # 100\n\n # Grab activated pixels\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n ### within the +/- margin of our polynomial function ###\n left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +\n left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +\n left_fit[1]*nonzeroy + left_fit[2] + margin)))\n right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +\n right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +\n right_fit[1]*nonzeroy + right_fit[2] + margin)))\n\n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n return leftx, lefty, rightx, righty\n\ndef plot_debug(binary_warped, left_x_nonzero, left_y_nonzero, right_x_nonzero, right_y_nonzero,\n left_fit_poly, right_fit_poly, margin):\n\n \"\"\"\n Taken from materials.\n\n Visualization of relevant debug information, to better estimate the quality of the\n lane detection pipeline.\n \"\"\"\n ## Visualization ##\n # Create an image to draw on and an image to show the selection window\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n window_img = np.zeros_like(out_img)\n # Color in left and right line pixels\n out_img[left_y_nonzero, left_x_nonzero] = [255, 0, 0]\n out_img[right_y_nonzero, right_x_nonzero] = [0, 0, 255]\n\n ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )\n\n try:\n left_fitx = left_fit_poly[0]*ploty**2 + left_fit_poly[1]*ploty + left_fit_poly[2]\n right_fitx = right_fit_poly[0]*ploty**2 + right_fit_poly[1]*ploty + right_fit_poly[2]\n except TypeError:\n # Avoids an error if `left` and `right_fit` are still none or incorrect\n print('The function failed to fit a line!')\n left_fitx = 1*ploty**2 + 1*ploty\n right_fitx = 1*ploty**2 + 1*ploty\n\n # Generate a polygon to illustrate the search window area\n # And recast the x and y points into usable format for cv2.fillPoly()\n left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))\n cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))\n result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n\n draw_left = (np.asarray([left_fitx, ploty]).T).astype(np.int32)\n draw_right = (np.asarray([right_fitx, ploty]).T).astype(np.int32)\n\n cv2.polylines(result, [draw_left], False, (255,0,0), thickness=5)\n cv2.polylines(result, [draw_right], False, (255,0,0), thickness=5)\n\n return result\n\ndef plot_lanes(undist, Minv, left_fit_poly, right_fit_poly):\n \"\"\"\n Taken from materials.\n\n Final visualization of the lane lines.\n \"\"\"\n # Generate x and y values for plotting\n img_shape = undist.shape\n\n ploty = np.linspace(0, undist.shape[0]-1, undist.shape[0] )\n try:\n left_fitx = left_fit_poly[0]*ploty**2 + left_fit_poly[1]*ploty + left_fit_poly[2]\n right_fitx = right_fit_poly[0]*ploty**2 + right_fit_poly[1]*ploty + right_fit_poly[2]\n except TypeError:\n # Avoids an error if `left` and `right_fit` are still none or incorrect\n print('The function failed to fit a line!')\n left_fitx = 1*ploty**2 + 1*ploty\n right_fitx = 1*ploty**2 + 1*ploty\n\n # Create an image to draw the lines on\n warp_zero = np.zeros(img_shape[:2]).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n draw_left = (np.asarray([left_fitx, ploty]).T).astype(np.int32)\n draw_right = (np.asarray([right_fitx, ploty]).T).astype(np.int32)\n cv2.polylines(color_warp, [draw_left], False, (255,0,0), thickness=5)\n cv2.polylines(color_warp, [draw_right], False, (255,0,0), thickness=5)\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (img_shape[1], img_shape[0]))\n\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.5, 0)\n\n return result\n\ndef plot_poly(ploty, poly):\n \"\"\"\n Taken from the materials and modified.\n\n Returns a set of plotx points calulated from the polynomial and input ploty data.\n \"\"\"\n\n fit_success = False\n\n try:\n plotx = poly[0]*ploty**2 + poly[1]*ploty + poly[2]\n fit_success = True\n except TypeError:\n # Avoids an error if poly is still none or incorrect\n print('The function failed to fit a line!')\n plotx = 1*ploty**2 + 1*ploty\n\n return plotx, fit_success\n\ndef fit_poly_to_points(x, y):\n \"\"\"\n Taken from the materials.\n\n Based on the detected points, calculate polynomials of the lane curve.\n \"\"\"\n fit_success = True\n\n try:\n fit = np.polyfit(x, y, 2)\n except np.RankWarning:\n # In case if polyfit fails, return coefficients of x = 0 line\n fit = [0, 0, 0]\n fit_success = False\n\n return fit, fit_success\n\ndef fit_poly_to_lanes(warped_binary):\n \"\"\"\n Procedure for detecting road lanes based on the binary pixel data, obtained by filtering and\n warping each recorded frame.\n \"\"\"\n\n import globals\n lane_params = globals.lane_params\n\n # Fetch previously detected lanes\n lanes = lane_params.detected_lanes\n\n # Current lane\n current_lane = globals.Lane_fits()\n lanes_length = len(lanes)\n\n if lanes_length == 0:\n # Try new mirror detection sequence\n leftx, lefty, rightx, righty = find_pixels_mirror(warped_binary, lane_params.find_pixels_mirror)\n\n else:\n # Use previous best fit to define fit area\n average_lane = lane_params.best_fit\n leftx, lefty, rightx, righty = find_pixels_poly(warped_binary, average_lane.left_fit,\n average_lane.right_fit, lane_params.find_pixels_poly)\n\n # Calculate polynomial from detected points\n left_fit, left_fit_success = fit_poly_to_points(lefty, leftx)\n right_fit, right_fit_success = fit_poly_to_points(righty, rightx)\n fit_success = left_fit_success & right_fit_success\n\n current_lane.left_fit = left_fit\n current_lane.right_fit = right_fit\n current_lane.fit_success = fit_success\n\n if (not fit_success) and (lanes_length == 0):\n logging.warning('Lane detection not successful.')\n\n if current_lane.fit_success:\n lanes.insert(0, current_lane)\n\n # Best fit\n best_fit = globals.find_lane_average(lanes)\n lane_params.best_fit = best_fit\n\n if len(lanes) > lane_params.lane_count:\n lanes.pop()\n\n return leftx, lefty, rightx, righty, best_fit\n\ndef radius_measurements(left_fit, right_fit, lane_params):\n '''\n Taken from the materials and adapted for the pipeline.\n\n Calculates the radius of the curvature of the lanes in [m].\n '''\n # Define y-value where we want radius of curvature\n # We'll choose the maximum y-value, corresponding to the bottom of the image\n\n # Lambda for calculating curvature radius in pixels\n # Comvert from lines measured in pixels to lines measured in meters\n xm_per_pix = lane_params.xm_per_pix\n ym_per_pix = lane_params.ym_per_pix\n\n # The bottom left point of the region, at the same time the lowest point of the curves\n y_pix = lane_params.lane_region[0][1]\n y_m = y_pix * ym_per_pix\n\n left_fit_m = left_fit * [xm_per_pix/ym_per_pix**2, xm_per_pix/ym_per_pix, xm_per_pix]\n right_fit_m = right_fit * [xm_per_pix/ym_per_pix**2, xm_per_pix/ym_per_pix, xm_per_pix]\n\n curv = lambda a, b, y : (1 + (2*a*y + b)**2)**(1.5) / np.abs(2*a)\n\n left_curverad = curv(left_fit_m[0], left_fit_m[1], y_m)\n right_curverad = curv(right_fit_m[0], right_fit_m[1], y_m)\n\n return left_curverad, right_curverad\n\ndef position_measurement(left_fit, right_fit, lane_params):\n '''\n Taken from the materials and adapted for the pipeline.\n\n Calculates the vehicle offset from the middle of the lane in [m].\n '''\n # Comvert from lines measured in pixels to lines measured in meters\n xm_per_pix = lane_params.xm_per_pix\n ym_per_pix = lane_params.ym_per_pix\n\n # The bottom left point of the region, at the same time the lowest point of the curves\n y_pix = lane_params.lane_region[0][1]\n y_m = y_pix * ym_per_pix\n\n left_fit_m = left_fit * lane_params.transform_poly_2_m\n right_fit_m = right_fit * lane_params.transform_poly_2_m\n\n # Calculate position from middle of the lane\n left_curve_pos = left_fit_m[0]*y_m**2 + left_fit_m[1]*y_m + left_fit_m[2]\n right_curve_pos = right_fit_m[0]*y_m**2 + right_fit_m[1]*y_m + right_fit_m[2]\n\n lane_middle_pos = (left_curve_pos + right_curve_pos) / 2\n image_middle_pos = lane_params.img_shape[1] * xm_per_pix / 2\n\n # Since x values grow to the right, positive values here mean vehicle is shifted to\n # the right of the lane middle\n vehicle_pos = image_middle_pos - lane_middle_pos\n\n return vehicle_pos","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":16602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"296812505","text":"class Solution(object):\n \n def minSteps(self, n):\n if n == 1:\n return 0\n if (is_prime(n)):\n return n\n else:\n factor_pairs = f(n)\n min_value = sys.maxint\n for pair in factor_pairs:\n if pair[0] == n or pair[1] == n:\n continue\n current_value = self.minSteps(pair[0]) + self.minSteps(pair[1])\n if current_value < min_value:\n min_value = current_value\n return min_value\n \ndef f(val):\n return [(i, val / i) for i in range(1, int(val**0.5)+1) if val % i == 0]\n\ndef is_prime(n):\n '''check if integer n is a prime'''\n\n # make sure n is a positive integer\n n = abs(int(n))\n\n # 0 and 1 are not primes\n if n < 2:\n return False\n\n # 2 is the only even prime number\n if n == 2: \n return True \n\n # all other even numbers are not primes\n if not n & 1: \n return False\n\n # range starts with 3 and only needs to go up \n # the square root of n for all odd numbers\n for x in range(3, int(n**0.5) + 1, 2):\n if n % x == 0:\n return False\n\n return True","sub_path":"python/LeetCode/minsteps.py","file_name":"minsteps.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"482821358","text":"from nltk.classify import NaiveBayesClassifier\nfrom nltk.corpus import subjectivity\nfrom nltk.sentiment import SentimentAnalyzer\nfrom nltk.sentiment.util import *\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.stem.porter import PorterStemmer\n#import csv\nimport pandas as pd\n#nltk.download('subjectivity')\n#nltk.download('stopwords')\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\n\n\"happy, sad, angry, calm\"\n\n\"\"\"\nFunction to reduce the number of words in the programs dictionary. \nConverts sentence to a list of lemmatized lowercase words.\n\"\"\"\n\n\nsentences = [\"VADER is smart, handsome, and funny.\", # positive sentence example\n \"VADER is smart, handsome, and funny!\", # punctuation emphasis handled correctly (sentiment intensity adjusted)\n \"VADER is very smart, handsome, and funny.\", # booster words handled correctly (sentiment intensity adjusted)\n \"VADER is VERY SMART, handsome, and FUNNY.\", # emphasis for ALLCAPS handled\n \"VADER is VERY SMART, handsome, and FUNNY!!!\",# combination of signals - VADER appropriately adjusts intensity\n \"VADER is VERY SMART, really handsome, and INCREDIBLY FUNNY!!!\",# booster words & punctuation make this close to ceiling for score\n \"The book was good.\", # positive sentence\n \"The book was kind of good.\", # qualified positive sentence is handled correctly (intensity adjusted)\n \"The plot was good, but the characters are uncompelling and the dialog is not great.\", # mixed negation sentence\n \"A really bad, horrible book.\", # negative sentence with booster words\n \"At least it isn't a horrible book.\", # negated negative sentence with contraction\n \":) and :D\", # emoticons handled\n \"\", # an empty string is correctly handled\n \"Today sux\", # negative slang handled\n \"Today sux!\", # negative slang with punctuation emphasis handled\n \"Today SUX!\", # negative slang with capitalization emphasis\n \"Today kinda sux! But I'll get by, lol\" # mixed sentiment example with slang and constrastive conjunction \"but\"\n]\ntricky_sentences = [\n \"Most automated sentiment analysis tools are shit.\",\n \"VADER sentiment analysis is the shit.\",\n \"Sentiment analysis has never been good.\",\n \"Sentiment analysis with VADER has never been this good.\",\n \"Warren Beatty has never been so entertaining.\",\n \"I won't say that the movie is astounding and I wouldn't claim that \\\n the movie is too banal either.\",\n \"I like to hate Michael Bay films, but I couldn't fault this one\",\n \"It's one thing to watch an Uwe Boll film, but another thing entirely \\\n to pay for it\",\n \"The movie was too good\",\n \"This movie was actually neither that funny, nor super witty.\",\n \"This movie doesn't care about cleverness, wit or any other kind of \\\n intelligent humor.\",\n \"Those who find ugly meanings in beautiful things are corrupt without \\\n being charming.\",\n \"There are slow and repetitive parts, BUT it has just enough spice to \\\n keep it interesting.\",\n \"The script is not fantastic, but the acting is decent and the cinematography \\\n is EXCELLENT!\",\n \"Roger Dodger is one of the most compelling variations on this theme.\",\n \"Roger Dodger is one of the least compelling variations on this theme.\",\n \"Roger Dodger is at least compelling as a variation on the theme.\",\n \"they fall in love with the product\",\n \"but then it breaks\",\n \"usually around the time the 90 day warranty expires\",\n \"the twin towers collapsed today\",\n \"However, Mr. Carter solemnly argues, his client carried out the kidnapping \\\n under orders and in the ''least offensive way possible.''\"\n]\nsentences.append(tricky_sentences)\nvader = SentimentIntensityAnalyzer()\nfor sentence in sentences:\n valence = vader.polarity_scores(sentence)['compound']\n print(sentence)\n print(\"vader: \", valence)\n\ndef sentenceToFeatures(sentence):\n lem = WordNetLemmatizer()\n txt = []\n for word in nltk.word_tokenize(sentence):\n word = word.lower()\n word = lem.lemmatize(word, \"v\")\n if word not in stopwords.words(\"english\"):\n txt.append(word)\n # Would be time efficent to add words to dictionary here,\n # but to keep this function more general I will not.\n #dictionary.add(word)\n return txt\n\nvalence_docs = []\narousal_docs = []\nsentiment_docs = []\ntest_docs = []\ndictionary = set()\npv = 0\nnv = 0\npa = 0\nna = 0\n\ndef categorizeSentiment(v, a):\n sent = \"\"\n if v <= 0 and a <= 0:\n sent = \"sad\"\n if v <= 0 and a > 0:\n sent = \"angry\"\n if v > 0 and a <= 0:\n sent = \"calm\"\n if v > 0 and a > 0:\n sent = \"happy\"\n return sent\n\ndef addData(text, v, a):\n try:\n sent = categorizeSentiment(v, a)\n txt = sentenceToFeatures(text)\n for word in txt:\n dictionary.add(word)\n #test_docs.append(text)\n valence_docs.append((txt, v))\n arousal_docs.append((txt, a))\n sentiment_docs.append((txt, sent))\n except:\n print(\"Failed to add text: \", text)\n\nprint(\"Reading data...\")\neb = pd.read_csv('emobank.csv', index_col=0)\nnrc = pd.read_csv('NRC-VAD-Lexicon.txt', delim_whitespace=True, error_bad_lines=False)\n\n#read nrc\nfor index, row in nrc.iterrows():\n word = row[0]\n v = float(row[1])\n a = float(row[2])\n v = v * 10 - 5\n a = a * 10 - 5\n if v >= 0:\n pv += 1\n else:\n nv += 1\n if a >= 0:\n pa += 1\n else:\n na += 1\n #addData(word, v, a)\n\n#read emobank\ntest_count = 100\nfor index, row in eb.iterrows():\n v = float(row[\"V\"])\n a = float(row[\"A\"])\n text = row[\"text\"]\n # change from 6 pt positive scale to 10 point scale centered at 0\n v = v * 10 / 6 - 5\n a = a * 10 / 6 - 5\n if v >= 0:\n pv += 1\n else:\n nv += 1\n if a >= 0:\n pa += 1\n else:\n na += 1\n\n test_count += 1\n if test_count % 100 == 0:\n test_docs.append(text)\n addData(text, v, a)\n\n\nprint(\"Number of entries with positive or neutral Valence: \", pv)\nprint(\"Number of entries with negative Valence: \", nv)\nprint(\"Number of entries with positive or neutral Arousal: \", pa)\nprint(\"Number of entries with negative Arousal: \", na)\nprint(\"Number of words in dictionary: \", len(dictionary))\nprint(\"Number of test docs: \", len(test_docs))\nprint(\"Building training sets...\")\nvalence_training_set = [({word: (word in (x[0])) for word in dictionary}, x[1]) for x in valence_docs]\narousal_training_set = [({word: (word in (x[0])) for word in dictionary}, x[1]) for x in arousal_docs]\nsentiment_training_set = [({word: (word in (x[0])) for word in dictionary}, x[1]) for x in sentiment_docs]\n\nprint(\"Training Valence Classifier...\")\nvalence_classifier = NaiveBayesClassifier.train(valence_training_set)\n\nprint(\"Training Arousal Classifier...\")\narousal_classifier = NaiveBayesClassifier.train(arousal_training_set)\n\nprint(\"Training Sentiment Classifier...\")\nsentiment_classifier = NaiveBayesClassifier.train(sentiment_training_set)\n\nprint(\"Testing...\")\n\n\nfor sentence in sentences:\n test_data_features = {word: (word in sentenceToFeatures(sentence)) for word in dictionary}\n print(\"Sentence: \", sentence)\n print(\"Valence: \", valence_classifier.classify(test_data_features))\n print(\"Arousal: \", arousal_classifier.classify(test_data_features))\n print(\"Sentiment: \", sentiment_classifier.classify(test_data_features))\n\n\n\nfor sentence in test_docs:\n test_data_features = {word: (word in sentenceToFeatures(sentence)) for word in dictionary}\n print(\"Sentence: \", sentence)\n print(\"Valence: \", valence_classifier.classify(test_data_features))\n print(\"Arousal: \", arousal_classifier.classify(test_data_features))\n print(\"Sentiment: \", sentiment_classifier.classify(test_data_features))\n","sub_path":"Sentiment.py","file_name":"Sentiment.py","file_ext":"py","file_size_in_byte":7857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"602664496","text":"from collections import deque\nimport datetime\n\n\nfrom qstrader import settings\nfrom qstrader.event import SignalEvent, EventType\nfrom qstrader.compat import queue\nfrom wrappers.forex_trading_session import ForexTradingSession\n\nfrom lib.com import *\n\n\ndef run(strategy, tickers, start_date, end_date, initial_equity, testing=False):\n # Backtest information\n strategy_name = strategy.__class__.__name__\n title = ['forex-%s' % (strategy_name)]\n \n backtest = ForexTradingSession(\n strategy, tickers,\n initial_equity, start_date, end_date,\n title=title\n )\n session_id = backtest.trade_session_id\n results = backtest.start_trading(testing=testing, \n outfile=\"%s/%s-%s.png\" % (config.OUTPUT_DIR,\n strategy_name, str(session_id)))\n return results\n\n\nif __name__ == \"__main__\":\n from strategies.simple_moving_average_cross import SimpleMovingAverageCrossStrategy\n # Configuration data\n testing = False\n start_date = datetime.datetime(2016, 8, 4)\n end_date = datetime.datetime(2017, 8, 4)\n initial_equity = 10000.0\n tickers = [\"USD_JPY-H1\"]\n sma = SimpleMovingAverageCrossStrategy()\n \n run(\"demo\", sma, tickers, start_date, end_date, initial_equity, testing)\n \n \n","sub_path":"minetrader/backtests/general_backtest.py","file_name":"general_backtest.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"35629867","text":"__author__ = 'Mikhail'\nimport numpy as np\n\n\ndef north_west_method(a, b):\n\tx = np.matrix(np.zeros((a[0].size, b[0].size)))\n\tj, i = 0, 0\n\tB = []\n\twhile i in range(a[0].size):\n\t\tm = min(a[0, i], b[0, j])\n\t\ta[0, i] -= m\n\t\tb[0, j] -= m\n\t\tx[i, j] = m\n\t\tB.append((i, j))\n\t\tif a[0, i] == 0:\n\t\t\ti += 1\n\t\tif b[0, j] == 0:\n\t\t\tj += 1\n\tanswer = {'B': B, 'x': x}\n\treturn answer\n\n\ndef compute_u_v(B, c):\n\tu_count = c[0].size - 1\n\tv_count = c[:, 0].size\n\ta = np.array(np.zeros((len(B), u_count + v_count)))\n\tb = np.array(np.zeros(len(B)))\n\tfor i in range(len(B)):\n\t\tif B[i][0] != 0:\n\t\t\ta[i, B[i][0] - 1] = 1\n\t\ta[i, u_count + B[i][1]] = 1\n\t\tb[i] = c[B[i][0], B[i][1]]\n\tsolved_u_v = np.linalg.solve(a, b)\n\n\tu = [0]\n\tfor i in range(u_count):\n\t\tu.append(solved_u_v[i])\n\n\tv = []\n\tfor i in range(u_count, u_count + v_count):\n\t\tv.append(solved_u_v[i])\n\n\treturn {'u': u, 'v': v}\n\n\ndef getNeighbors(node, seq):\n\tneighbors = []\n\trow = False\n\tcolomn = False\n\tfor j in seq:\n\t\tif j[0] == node[0] and not colomn:\n\t\t\tneighbors.append(j)\n\t\t\tcolomn = True\n\t\tif j[1] == node[1] and not row:\n\t\t\tneighbors.append(j)\n\t\t\trow = True\n\treturn neighbors\n\n\ndef createPath(h, node, path):\n\tfor i in h[node]:\n\t\tif i not in path:\n\t\t\tpath.append(i)\n\t\t\treturn createPath(h, i, path)\n\treturn path\n\n\ndef find_cycle(B, start_node):\n\tseq = set(B)\n\th = {}\n\tfor i in range(len(B)):\n\t\tseq.remove(B[i])\n\t\tnodes = getNeighbors(B[i], seq)\n\t\tif len(nodes) > 1:\n\t\t\tseq.add(B[i])\n\t\t\th.setdefault(B[i], nodes)\n\treturn createPath(h, start_node, [start_node])\n\n\ndef iterate(x, B, c):\n\tu_v = compute_u_v(B, c)\n\tu = u_v['u']\n\tv = u_v['v']\n\n\t# calculate N\n\tN = []\n\tfor i in range(c[0].size):\n\t\tfor j in range(c[:, 0].size):\n\t\t\tif (i, j) not in B:\n\t\t\t\tN.append((i, j))\n\n\t# check u+v < c\n\tfor i in range(len(N)):\n\t\tif u[N[i][0]] + v[N[i][1]] > c[N[i][0], N[i][1]]:\n\n\t\t\t# () Add -> B\n\t\t\tB.append(N[i])\n\t\t\tB.sort()\n\n\t\t\t# calculate cycle\n\t\t\tpath = find_cycle(B, N[i])\n\n\t\t\t# find min node of cycle and it value\n\t\t\tmin_node = path[1]\n\t\t\tfor i in range(1, int(len(path) / 2)):\n\t\t\t\tif x[min_node[0], min_node[1]] > x[path[i * 2 + 1][0], path[i * 2 + 1][1]]:\n\t\t\t\t\tmin_node = path[i * 2 + 1]\n\n\t\t\tmin_value = x[min_node[0], min_node[1]]\n\n\n\t\t\t# update X\n\t\t\tfor i in range(len(path)):\n\t\t\t\tif i % 2:\n\t\t\t\t\tx[path[i][0], path[i][1]] -= min_value\n\t\t\t\telse:\n\t\t\t\t\tx[path[i][0], path[i][1]] += min_value\n\n\n\t\t\t# remove min mod from B\n\t\t\tB.remove(min_node)\n\n\n\t\t\treturn iterate(x, B, c)\n\treturn x\n\n\na = np.matrix([70, 80, 70])\nb = np.matrix([100, 60, 60])\nc = np.matrix([[5, 2, 8], [2, 1, 6], [7, 5, 4]])\n\n# a = np.matrix([80, 80, 90])\n# b = np.matrix([50, 100, 100])\n# c = np.matrix([[1, 1, 6], [10, 3, 5], [4, 5, 8]])\n\n# a = np.matrix([50, 50, 100])\n# b = np.matrix([40, 90, 70])\n# c = np.matrix([[2, 5, 3], [4, 3, 2], [5, 1, 2]])\n\n_x = north_west_method(a, b)\nB = _x['B']\nx = _x['x']\n# print(x)\n\nprint(iterate(x, B, c))\n","sub_path":"kurs_3/sem_2/moiu/lb/fourth/TMT.py","file_name":"TMT.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"247776579","text":"#!/bin/python3\n\"\"\"\nYou are given an unordered array consisting of consecutive integers without duplicates.\nFind the minimum number of swaps required to sort the array in ascending order.\n\"\"\"\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# Workings:\n# First, swap any direct pairs (i.e. arr[a] == b and arr[b] == a).\n# Then, put elements in the correct place from left to right.\ndef minimumSwaps(arr):\n swaps = 0\n # Find any direct pair swaps.\n for i in range(0, len(arr)):\n position = i+1\n value = arr[i]\n if (arr[value - 1] == position) and not (position == value):\n arr[i] = position\n arr[value - 1] = value\n swaps = swaps + 1\n # Make swaps to correct list from left to right.\n for i in range(0, len(arr)):\n position = i+1\n value = arr[i]\n if not (position == value):\n toSwap = arr.index(position)\n arr[i] = arr[toSwap]\n arr[toSwap] = value\n swaps = swaps + 1\n return swaps\n\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = minimumSwaps(arr)\n\n fptr.write(str(res) + '\\n')\n\n fptr.close()\n","sub_path":"interview-preparation-kit/arrays/minimum-swaps-2.py","file_name":"minimum-swaps-2.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"403639069","text":"#!/usr/bin/env python\nimport sys\nimport rospy\n\nfrom phantomx_gazebo.phantomx import PhantomX\n\nstate_ = 0\nstate_dict_ = {\n 0: 'turn right',\n 1: 'turn left',\n 2: 'go forward'\n}\n\ndef change_state(state):\n global state_, state_dict_\n if state is not state_:\n #print 'Hexabot - [%s] - %s' % (state, state_dict_[state])\n state_ = state\n\ndef take_action():\n regions = regions_\n robot.set_walk_velocity(0, 0, 0)\n \n state_description = ''\n \n d = 2.\n \n if regions['right'] < 0.7:\n state_description = 'case 0 - right'\n change_state(1)\n elif regions['front'] > d and regions['fleft'] > d and regions['fright'] > d:\n state_description = 'case 1 - nothing'\n change_state(0)\n elif regions['front'] < d and regions['fleft'] > d and regions['fright'] > d:\n state_description = 'case 2 - front'\n change_state(1)\n elif regions['front'] > d and regions['fleft'] > d and regions['fright'] < d:\n state_description = 'case 3 - fright'\n change_state(2)\n elif regions['front'] > d and regions['fleft'] < d*1.5 and regions['fright'] > d:\n state_description = 'case 4 - fleft'\n change_state(0)\n elif regions['front'] < d and regions['fleft'] > d and regions['fright'] < d:\n state_description = 'case 5 - front and fright'\n change_state(1)\n elif regions['front'] < d and regions['fleft'] < d and regions['fright'] > d:\n state_description = 'case 6 - front and fleft'\n change_state(0)\n elif regions['front'] < d and regions['fleft'] < d and regions['fright'] < d:\n state_description = 'case 7 - front and fleft and fright'\n change_state(1)\n elif regions['front'] > d and regions['fleft'] < d and regions['fright'] < d:\n state_description = 'case 8 - fleft and fright'\n change_state(0)\n else:\n state_description = 'unknown case'\n rospy.loginfo(regions)\n\ndef turn_right():\n robot.set_walk_velocity(1, 0, -1)\n rospy.sleep(0.2)\n\ndef turn_left():\n robot.set_walk_velocity(0.5, 0, 0.5)\n rospy.sleep(0.2)\n\ndef go_forward():\n \n robot.set_walk_velocity(1, 0, 0)\n rospy.sleep(0.2)\n\nif __name__ == '__main__':\n rospy.init_node('wall_follower')\n\n rospy.loginfo('Instantiating robot Client')\n robot = PhantomX()\n rospy.sleep(1)\n\n rospy.loginfo('Cave Exploration Starting')\n\n #print robot.lidar_ranges[180] #180=front, 270=left, 90=right\n\n rate = rospy.Rate(25)\n while not rospy.is_shutdown():\n global regions_\n regions_ = {\n 'left': min(min(robot.lidar_ranges[680:720]), 10),\n 'fleft': min(min(robot.lidar_ranges[380:680]), 10),\n 'front': min(min(robot.lidar_ranges[340:380]), 10),\n 'fright': min(min(robot.lidar_ranges[40:340]), 10),\n 'right': min(min(robot.lidar_ranges[0:40]), 10)\n }\n take_action()\n\n if state_ == 0:\n turn_right()\n elif state_ == 1:\n turn_left()\n elif state_ == 2:\n go_forward()\n else:\n rospy.logerr('Unknown state')\n\n rospy.loginfo('Cave Exploration Finished')\n","sub_path":"workspaceRos/src/phantomx/phantomx_control/src/wall_follower.py","file_name":"wall_follower.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"516883923","text":"import pandas as pd\r\nimport glob\r\npd.set_option(\"display.max_columns\", 100)\r\npd.set_option(\"display.max_rows\", 1000)\r\npd.set_option(\"display.width\", 600)\r\n\r\n\"\"\"가장 처음이 되는 디렉토리 : 이 폴더와 동일한 위치에 코드가 위치해야함.\r\n이 코드는 parquet 확장자 데이터가 모두 다운로드 되었는지 개수를 확인하기 위한 코드입니다.\r\n출력결과는 .txt 파일에 적혀 나옵니다.\"\"\"\r\n\r\nDic1 = 'devicetype=Air-Conditioner'\r\n\r\n\"\"\"분석하고자하는 월 선택 : \r\n (ex) 4월 1일부터 30일까지(31설정시)\"\"\"\r\n\r\nmonth = '04'\r\n\r\nf = open(\"filenumber_{}.txt\".format(month), 'w')\r\n\r\nfor i in range(1, 31):\r\n if i < 10:\r\n Dic2 = 'date=2020-{}-0{}'.format(month, i)\r\n elif i >= 10:\r\n Dic2 = 'date=2020-{}-{}'.format(month, i)\r\n files = glob.glob(Dic1 + '/' + Dic2 + '/*.parquet')\r\n text = \"[{}] : {}/{}\\n\".format(len(files), Dic1, Dic2)\r\n f.write(text)\r\n print(text)\r\nf.close()","sub_path":"DataPreprocessing/2.parquet_numbering.py","file_name":"2.parquet_numbering.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"602431864","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\nimport urx \nimport chess \nimport sys\nfrom PyQt5.QtWidgets import QWidget, QAction, qApp, QApplication, QLabel,QGridLayout\nfrom PyQt5.QtGui import QIcon, QPixmap\nfrom PyQt5.Qt import QTextEdit, QVBoxLayout, QPushButton, QHBoxLayout\nimport InitScreen\nimport fenInit\nfrom Threads import *\nfrom PyQt5 import QtCore\nfrom dataStructers import camId as id\n#from engine import Engine\n\nclass MainGame(QWidget):\n \"'it` a game class\"''\n def __init__(self,debug,playersNum):\n super().__init__()\n self.debug = debug\n #self.camera = cv2.VideoCapture(id+700)\n\n #self.connect2Robot()\n\n if self.debug:\n self.debug = debug\n print('this is debug mode')\n else:\n print('all fine')\n self.robot = FakeRobot('ip',1)\n dialog = fenInit.InitScreen(self.robot)\n dialog.exec()\n dialog = InitScreen.InitScreen(0, self.robot)\n dialog.exec()\n\n self.playersNum = dialog.playersNum\n self.restartStatus = [True,True,True]\n self.difficulties =dialog.plDifficult\n self.aiDifficulties=dialog.aiDifficult\n self.players = dialog.isPlayer\n self.initDone = False\n self.curPlayer = [0,1]\n self.curInt = 0\n self.loopDone = [True,True,True]\n self.RobotMoveDone = [True,True,True]\n self.GenerateMoveDone = [True,True,True,True]\n self.playerPhotoStatus=[True,True,True]\n self.InitUI(False)\n self.CheckPermission = [False,False,False]\n self.InitDraw()\n\n def connect2Robot(self):\n #self.robot = urx.Robot(\"192.168.0.20\", use_rt=True)\n print(self.robot)\n for i in range(7):\n self.robot.set_digital_out(i, False)\n self.robot.movej(dataStructers.playerOneJPose,vel=0.6,acc=0.6)\n return(self.robot)\n\n def InitUI(self,old):\n\n if old==False:\n\n self.buttonsPause=[]\n self.buttonsPhoto=[]\n self.buttonsReset=[]\n self.buttonsStop=[]\n self.playerNamesLabels = []\n self.aiNamesLabels = []\n self.chessboardDisplays = []\n self.turn = ['w','w','w']\n self.curTurn = []\n self.logs = []\n\n for i in range(self.playersNum):\n\n self.playerNamesLabels.append(QLabel(self))\n if self.players[i]==True:\n self.playerNamesLabels[i].setText(\"Player \\nPlayer \"+str(i+1))\n else:\n self.playerNamesLabels[i].setText(\"Player \\n Ai:\"+str(self.aiDifficulties[i]))\n self.playerNamesLabels[i].resize(80,40)\n self.playerNamesLabels[i].move(4+500*i,0)\n\n self.aiNamesLabels.append(QLabel(self))\n self.aiNamesLabels[i].setText(\"Robot\\n AI:\"+ str(self.difficulties[i]))\n self.aiNamesLabels[i].resize(80,40)\n self.aiNamesLabels[i].move(4+500*i,55)\n\n self.curTurn.append(QLabel('123',self))\n if self.turn[i]=='w':\n self.curTurn[i].setText('Current\\n Turn:\\n White')\n else:\n self.curTurn[i].setText('Current\\n Turn:\\n Black')\n self.curTurn[i].resize(80,80)\n self.curTurn[i].move(4+500*i,110)\n\n\n self.buttonsPause.append(QPushButton('Pause',self))\n self.buttonsReset.append(QPushButton('Restart',self))\n self.buttonsPhoto.append(QPushButton('ReMake',self))\n\n\n self.buttonsPause[i].resize(150,40)\n self.buttonsPause[i].move(80+500*i,540)\n\n self.buttonsReset[i].resize(150,40)\n self.buttonsReset[i].move(250+500*i,540)\n\n self.buttonsPhoto[i].resize(40,40)\n self.buttonsPhoto[i].move(420+500*i,540)\n\n\n\n\n self.logs.append(QTextEdit(self))\n self.logs[i].setReadOnly(True)\n self.logs[i].resize(400,100)\n self.logs[i].move(55+500*i,420)\n\n\n self.chessboardDisplays.append(QLabel(self))\n self.chessboardDisplays[i].setPixmap(QPixmap(\"images/BoardBase.png\"))\n self.chessboardDisplays[i].resize(400,400)\n self.chessboardDisplays[i].move(55+500*i,0)\n\n self.buttonsChange = QPushButton('',self)\n self.buttonsChange.setIcon(QIcon('images\\settings.png'))\n self.buttonsChange.resize(48,48)\n self.buttonsChange.move(4,192)\n\n self.buttonsChange.clicked.connect(self.ChangeSetting)\n self.buttonsChange.show()\n\n #button function\n if self.playersNum == 1:\n self.buttonsReset[0].clicked.connect(self.RestartOne)\n self.buttonsPhoto[0].clicked.connect(self.CorrectBaseOne)\n self.buttonsPause[0].clicked.connect(self.PauseOne)\n if self.playersNum == 2:\n self.buttonsReset[0].clicked.connect(self.RestartOne)\n self.buttonsReset[1].clicked.connect(self.RestartTwo)\n self.buttonsPhoto[0].clicked.connect(self.CorrectBaseOne)\n self.buttonsPhoto[1].clicked.connect(self.CorrectBaseTwo)\n self.buttonsPause[0].clicked.connect(self.PauseOne)\n self.buttonsPause[1].clicked.connect(self.PauseTwo)\n if self.playersNum == 3:\n self.buttonsReset[0].clicked.connect(self.RestartOne)\n self.buttonsReset[1].clicked.connect(self.RestartTwo)\n self.buttonsReset[2].clicked.connect(self.RestartThree)\n self.buttonsPhoto[0].clicked.connect(self.CorrectBaseOne)\n self.buttonsPhoto[1].clicked.connect(self.CorrectBaseTwo)\n self.buttonsPhoto[2].clicked.connect(self.CorrectBaseThree)\n self.buttonsPause[0].clicked.connect(self.PauseOne)\n self.buttonsPause[1].clicked.connect(self.PauseTwo)\n self.buttonsPause[2].clicked.connect(self.PauseThree)\n\n self.setWindowTitle('ChessGamer')\n self.setGeometry(100, 100, 520*self.playersNum, 590)\n self.setFixedSize(520*self.playersNum, 590)\n\n\n self.show()\n else:\n sendButton = QPushButton(\"Send\",self)\n\n cancelButton = QPushButton('Exit',self)\n cancelButton.clicked.connect(qApp.quit)\n self.textBox = QTextEdit()\n self.textBox.setReadOnly(True)\n\n self.textEdit = QTextEdit()\n\n self.pic = QLabel()\n\n\n self.pic.setPixmap(QPixmap(\"images/BoardBase.png\"))\n self.pic.resize(350,350)\n self.pic.show()\n\n self.debugBoard =chess.Board()\n if self.debug:\n print('this is board in init: ',self.debugBoard)\n sendButton.clicked.connect(self.DrawBoard)\n\n vbox = QVBoxLayout()\n hbox = QHBoxLayout()\n hbox.addStretch(1)\n vbox.addStretch(1)\n hbox.addWidget(sendButton)\n hbox.addWidget(cancelButton)\n vbox.addWidget(self.pic)\n vbox.addWidget(self.textBox)\n vbox.addLayout(hbox)\n\n\n\n self.setGeometry(500, 300, 300, 200)\n self.setWindowTitle('ChessGamer')\n self.setLayout(vbox)\n self.show()\n\n\n def ChangeSetting(self):\n\n self.close()\n self.__init__(False, 1)\n self.GameLoop()\n\n def InitDraw(self):\n #gameboard picture\n fontWhite = cv2.imread(\"images/white_square.png\")\n fontBlack = cv2.imread(\"images/brown_square.png\")\n boardPic = np.zeros((400,400,3),dtype=np.uint8)\n tick =0\n\n for x in range(8):\n for y in range(8):\n\n if x%2 == 0:\n if tick %2==0:\n\n boardPic[50*x:50+50*x,50*y:50+50*y] = fontWhite\n\n else:\n\n boardPic[50*x:50+50*x,50*y:50+50*y] = fontBlack\n\n\n else:\n\n if tick %2==0:\n\n boardPic[50*x:50+50*x,50*y:50+50*y] = fontBlack\n\n else:\n\n boardPic[50*x:50+50*x,50*y:50+50*y] = fontWhite\n\n tick += 1\n if self.debug:\n cv2.imshow(\"BoardPic\",boardPic)\n else:\n cv2.imwrite(\"images/BoardBase.png\",boardPic)\n\n def DrawBoard(self,board,player):\n if self.debug:\n print(\"im in drawBoardCall\")\n try:\n board = self.debugBoard\n except AttributeError:\n pass\n\n stringBoard=Board2String(board)\n boardPic = DrawBoardPic(stringBoard, self.debug)\n tmpImagename = \"images/tmp\"+str(player)+\".png\"\n cv2.imwrite(tmpImagename,boardPic)\n\n self.chessboardDisplays[player-1].setPixmap(QPixmap(tmpImagename))\n self.chessboardDisplays[player-1].show()\n\n def DrawGui(self):\n\n if self.playersNum ==1:\n self.chessboardDisplays[0].setPixmap(QPixmap('images/tmp0.png'))\n self.chessboardDisplays[0].show()\n if self.playersNum ==2:\n self.chessboardDisplays[0].setPixmap(QPixmap('images/tmp0.png'))\n self.chessboardDisplays[0].show()\n self.chessboardDisplays[1].setPixmap(QPixmap('images/tmp1.png'))\n self.chessboardDisplays[1].show()\n if self.playersNum ==3:\n self.chessboardDisplays[0].setPixmap(QPixmap('images/tmp0.png'))\n self.chessboardDisplays[0].show()\n self.chessboardDisplays[1].setPixmap(QPixmap('images/tmp1.png'))\n self.chessboardDisplays[1].show()\n self.chessboardDisplays[2].setPixmap(QPixmap('images/tmp2.png'))\n self.chessboardDisplays[2].show()\n\n def Draw(self,board,player):\n\n if player ==0:\n self.garbage = 0\n self.drawThread = DrawThread(board,player)\n self.drawThread.finished.connect(self.DrawGui)\n self.drawThread.setObjectName(\"Draw thread\")\n self.drawThread.start()\n\n if player ==1:\n self.garbage = 1\n self.drawThread1 = DrawThread(board,player)\n self.drawThread1.finished.connect(self.DrawGui)\n self.drawThread1.setObjectName(\"Draw thread\")\n self.drawThread1.start()\n if player ==2:\n self.garbage = 2\n self.drawThread2 = DrawThread(board,player)\n self.drawThread2.finished.connect(self.DrawGui)\n self.drawThread2.setObjectName(\"Draw thread\")\n self.drawThread2.start()\n\n def GainDataFromThread(self,strData):\n self.gainedData=strData\n #print('this is data in main loop',strData)\n\n def MoveDone(self,player):\n self.RobotMoveDone[player]=True\n self.GenerateMoveDone[player] = True\n\n\n if self.playersNum>1:\n\n\n if self.curPlayer[0]==0 and self.playersNum==2:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.playersNum==2:\n self.curPlayer[0]=0\n\n\n if self.playersNum==3:\n\n if self.curPlayer[0]==0 and self.curPlayer[1]==1:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.curPlayer[1]==1:\n self.curPlayer[0]=2\n self.curPlayer[1]=0\n elif self.curPlayer[0]==2 and self.curPlayer[1]==0:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.curPlayer[1]==0:\n self.curPlayer[0]=0\n self.curPlayer[1]=1\n\n def RestartDone(self,player):\n #print('Restart Done')\n self.playerStatus[player][0] = chess.Board()\n #print('2',self.playerStatus[player][0].fen())\n self.RobotMoveDone[player] = True\n self.GenerateMoveDone[player] = True\n self.restartStatus[player]=False\n self.playerStatus[player][4] = False\n\n def PlayerCheckDone(self,data):\n\n player = int(data[1])\n if data[0]=='0':\n '''\n if first data symbol 0 then no turn done\n '''\n if self.curPlayer[0]==0:\n\n self.robot.set_digital_out(dataStructers.OutButtonOne,True)\n elif self.curPlayer[0]==1:\n self.robot.set_digital_out(dataStructers.OutButtonTwo,True)\n elif self.curPlayer[0]==2:\n self.robot.set_digital_out(dataStructers.OutButtonThree,True)\n\n if self.playersNum>1:\n\n\n if self.curPlayer[0]==0 and self.playersNum==2:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.playersNum==2:\n self.curPlayer[0]=0\n\n\n if self.playersNum==3:\n\n if self.curPlayer[0]==0 and self.curPlayer[1]==1:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.curPlayer[1]==1:\n self.curPlayer[0]=2\n self.curPlayer[1]=0\n elif self.curPlayer[0]==2 and self.curPlayer[1]==0:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.curPlayer[1]==0:\n self.curPlayer[0]=0\n self.curPlayer[1]=1\n\n self.GenerateMoveDone[player] = True\n\n elif data[0]=='1':\n '''\n Player has made a move, need to push board\n \n '''\n self.robot.set_digital_out(dataStructers.led_red,False)\n self.robot.set_digital_out(dataStructers.led_blue,False)\n filename = 'player'+str(player)+'.txt'\n file = open(filename,'w')\n a = data[2:data.find('-')]\n b = data[data.find('-')+1:len(data)]\n uci = square2Uci(int(a)) +square2Uci(int(b))\n move= chess.Move.from_uci(uci)\n self.playerStatus[player][0].push(move)\n file.write(self.playerStatus[player][0].fen())\n file.close()\n self.GenerateMoveDone[player] = True\n\n\n\n elif data[0]=='2':\n '''\n something gone wrong and need to be corrected\n '''\n print('Something went wrong')\n if self.curPlayer[0]==0:\n\n self.robot.set_digital_out(dataStructers.OutButtonOne,True)\n elif self.curPlayer[0]==1:\n self.robot.set_digital_out(dataStructers.OutButtonTwo,True)\n elif self.curPlayer[0]==2:\n self.robot.set_digital_out(dataStructers.OutButtonThree,True)\n if self.playersNum>1:\n\n\n if self.curPlayer[0]==0 and self.playersNum==2:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.playersNum==2:\n self.curPlayer[0]=0\n\n\n if self.playersNum==3:\n\n if self.curPlayer[0]==0 and self.curPlayer[1]==1:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.curPlayer[1]==1:\n self.curPlayer[0]=2\n self.curPlayer[1]=0\n elif self.curPlayer[0]==2 and self.curPlayer[1]==0:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.curPlayer[1]==0:\n self.curPlayer[0]=0\n self.curPlayer[1]=1\n\n self.GenerateMoveDone[player] = True\n\n def LoopThreadDone(self):\n\n currentPlayer = self.curPlayer[0]\n self.robot.set_digital_out(dataStructers.led_red,False)\n self.robot.set_digital_out(dataStructers.led_blue,False)\n #self.CheckPermission[currentPlayer]=False\n\n\n\n\n if self.RobotMoveDone[currentPlayer]:\n\n prevBoard=self.playerStatus[currentPlayer][0].copy()\n a = int(self.gainedData[0:self.gainedData.find(\"-\")])\n\n if self.gainedData.count(\"+\")==1:\n #c = self.gainedData[-1]\n c = FindPosiblePromotion(self.playerStatus[self.curPlayer[0]][0],self.gainedData[-1])\n b = int(self.gainedData[self.gainedData.find(\"-\")+1:self.gainedData.find(\"+\")])\n uci = square2Uci(a)+square2Uci(b)+c\n else:\n c = ''\n b = int(self.gainedData[self.gainedData.find(\"-\")+1:len(self.gainedData)])\n uci = square2Uci(a)+square2Uci(b)\n move=chess.Move.from_uci(uci)\n\n\n\n if prevBoard.is_castling(move):\n castling = 1\n else:\n castling = 0\n\n filename = 'player'+str(currentPlayer)+'.txt'\n file = open(filename,'w')\n self.playerStatus[self.curPlayer[0]][0].push(move)\n file.write(self.playerStatus[self.curPlayer[0]][0].fen())\n file.close()\n boardWithMove = self.playerStatus[self.curPlayer[0]][0]\n #\n #print(boardWithMove.fen())\n #\n if self.playerStatus[currentPlayer][1]:\n if self.playerStatus[currentPlayer][0].is_check():\n\n self.robot.set_digital_out(dataStructers.led_red,True)\n self.robot.set_digital_out(dataStructers.led_blue,True)\n\n self.RobotMoveDone[currentPlayer]=False\n if self.playerStatus[currentPlayer][1]==True:\n robotMoveThread=RoboWorker(currentPlayer,[a,b,c],[prevBoard,boardWithMove],self.robot,castling,self.camera)\n elif self.playerStatus[currentPlayer][1]==False:\n castling+=3\n robotMoveThread=RoboWorker(currentPlayer,[a,b,c],[prevBoard,boardWithMove],self.robot,castling,self.camera)\n robotMoveThread.doneSignal.connect(self.MoveDone)\n robotMoveThread.start()\n robotMoveThread.setObjectName(\"Rpbpt move thread\")\n\n\n\n\n if BoardTurn(prevBoard) =='w':\n message = 'Player move: '+uci\n self.logs[self.curPlayer[0]].setAlignment(QtCore.Qt.AlignLeft)\n self.logs[self.curPlayer[0]].append(message)\n else:\n message = 'Robot move: '+uci\n self.logs[self.curPlayer[0]].setAlignment(QtCore.Qt.AlignRight)\n self.logs[self.curPlayer[0]].append(message)\n\n self.Draw(self.playerStatus[currentPlayer][0],currentPlayer)\n\n #self.GenerateMoveDone[player]= True\n\n def Loop(self):\n\n\n currentPlayer = self.curPlayer[0]\n #self.Draw(self.playerStatus[currentPlayer][0],currentPlayer)\n #print('Run gameloop')\n #print('Current turn',BoardTurn(self.playerStatus[self.curPlayer[0]][0]))\n\n if self.GenerateMoveDone[currentPlayer]:\n '''\n If move Generation from player or Ai is finished, than game may be continued\n '''\n if self.playerStatus[currentPlayer][0].is_game_over() == True or self.playerStatus[currentPlayer][4] == True:\n #print('Game is finished')\n '''\n If game is finished, then it must be restarted\n '''\n if self.playerStatus[currentPlayer][5]!=True:\n\n self.logs[currentPlayer].append(\"Game over\")\n\n\n print('Restart Started')\n self.GenerateMoveDone[currentPlayer] = False\n\n\n endBoard = self.playerStatus[currentPlayer][0].copy()\n if self.playerStatus[self.curPlayer[0]][1]:\n restartThread = RoboWorker(currentPlayer,[0,0,''],[endBoard,0],self.robot,6,self.camera)\n else:\n restartThread = RoboWorker(currentPlayer,[0,0,''],[endBoard,0],self.robot,2,self.camera)\n restartThread.restarSignal[int].connect(self.RestartDone)\n #restartThread.restarSignal[int].connect(self.LoopThreadDone)\n restartThread.start()\n\n\n elif self.playerStatus[currentPlayer][0].is_game_over() ==False and self.playerStatus[currentPlayer][5]==False:\n '''\n If a game is not finished and not paused, then it needs to Get move from ai or from player\n '''\n\n if self.restartStatus[currentPlayer]==False:\n '''\n some help to restart game board CHECK IT!!!!!!!!!!!!\n '''\n self.playerStatus[currentPlayer][0]=chess.Board()\n\n\n self.restartStatus[currentPlayer]=True\n\n #print('1',self.playerStatus[currentPlayer][0].fen())\n\n if BoardTurn(self.playerStatus[self.curPlayer[0]][0]) =='w':\n #print(\"White Move\")\n self.curTurn[currentPlayer].setText('Current\\n Turn:\\n White')\n\n if self.playerStatus[self.curPlayer[0]][1]:\n\n '''\n main human iteraction event\n '''\n #print('player Move')\n while True:\n file = open('status.txt','r')\n self.CheckPermission = file.read()\n file.close()\n if len(self.CheckPermission)==3:\n break\n if self.CheckPermission[self.curPlayer[0]]=='1':\n\n if self.curPlayer[0] ==0:\n data = '0'+self.CheckPermission[1]+self.CheckPermission[2]\n elif self.curPlayer[0] ==1:\n data = self.CheckPermission[0]+'0'+self.CheckPermission[2]\n elif self.curPlayer[0] ==2:\n data = self.CheckPermission[0]+self.CheckPermission[1] +'0'\n file = open('status.txt','w')\n file.write(data)\n file.close()\n self.robot.set_digital_out(2+currentPlayer,False)\n self.GenerateMoveDone[currentPlayer] = False\n #print('1111111111111111111111111111')\n checkPlayer = CheckBoard(self.curPlayer[0],self.playerStatus[currentPlayer][0],self.robot,self.camera,2)\n checkPlayer.result[str].connect(self.PlayerCheckDone)\n checkPlayer.start()\n\n #print('222222222222222')\n\n #self.CheckPermission[self.curPlayer[0]] = False\n else:\n if self.playersNum>1:\n\n\n if self.curPlayer[0]==0 and self.playersNum==2:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.playersNum==2:\n self.curPlayer[0]=0\n\n\n if self.playersNum==3:\n\n if self.curPlayer[0]==0 and self.curPlayer[1]==1:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.curPlayer[1]==1:\n self.curPlayer[0]=2\n self.curPlayer[1]=0\n elif self.curPlayer[0]==2 and self.curPlayer[1]==0:\n self.curPlayer[0]=1\n elif self.curPlayer[0]==1 and self.curPlayer[1]==0:\n self.curPlayer[0]=0\n self.curPlayer[1]=1\n\n #wait for videosource\n else:\n\n self.GenerateMoveDone[currentPlayer] = False\n\n print('Player is Ai making movement')\n\n gameThread = GameLoop(self.playerStatus[currentPlayer][0],self.playerStatus[currentPlayer][2],self.engine,currentPlayer)\n gameThread.dataSignal[str].connect(self.GainDataFromThread)\n gameThread.doneSignal[int].connect(self.LoopThreadDone)\n self.robot.set_digital_out(dataStructers.led_blue,True)\n if gameThread.isFinished:\n gameThread.setObjectName('Game thread')\n gameThread.start()\n\n if BoardTurn(self.playerStatus[self.curPlayer[0]][0]) =='b':\n #print('Black Move')\n self.curTurn[currentPlayer].setText('Current\\n Turn:\\n Black')\n\n self.GenerateMoveDone[currentPlayer] = False\n self.robot.set_digital_out(2+currentPlayer,False)\n #print(' Robot Move')\n gameThread1 = GameLoop(self.playerStatus[currentPlayer][0],self.playerStatus[currentPlayer][3],self.engine,currentPlayer)\n gameThread1.dataSignal[str].connect(self.GainDataFromThread)\n gameThread1.doneSignal[int].connect(self.LoopThreadDone)\n self.robot.set_digital_out(dataStructers.led_blue,True)\n if gameThread1.isFinished:\n gameThread1.setObjectName(\"game Thread 2\")\n gameThread1.start()\n self.robot.set_digital_out(2+currentPlayer,False)\n\n def CorrectBaseOne(self):\n\n restartThread = CheckBoard(0,'',self.robot,self.camera,3)\n self.robot.set_digital_out(dataStructers.OutButtonOne, True)\n restartThread.start()\n def CorrectBaseTwo(self):\n\n restartThread = CheckBoard(1,'',self.robot,self.camera,3)\n self.robot.set_digital_out(dataStructers.OutButtonTwo, True)\n restartThread.start()\n def CorrectBaseThree(self):\n\n restartThread = CheckBoard(2,'',self.robot,self.camera,3)\n self.robot.set_digital_out(dataStructers.OutButtonThree, True)\n restartThread.start()\n\n\n def StopGame(self,number):\n print(number)\n\n def initPhotoDone(self,data):\n print(data)\n print('Init Photo done')\n self.GenerateMoveDone[self.curInt] = True\n self.robot.set_digital_out(1,False)\n if self.curInt+1 ==self.playersNum:\n self.initDone =True\n if self.playerStatus[self.curInt-1][1]:\n self.robot.set_digital_out(2+self.curInt-1,True)\n else:\n self.robot.set_digital_out(2+self.curInt-1,False)\n self.curInt+=1\n\n def RestartOne(self):\n #self.playerStatus[0][0] = chess.Board() \n self.playerStatus[0][4] = True\n\n self.isdone=False\n # t =\n self.logs[0].append(\"Restart\\n\")\n self.Speaker(\"Перезапуск первого стола\")\n\n def RestartTwo(self):\n #self.playerStatus[1][0] = chess.Board() \n self.playerStatus[1][4] = True\n\n self.logs[1].append(\"Restart\\n\")\n self.Speaker(\"Перезапуск второго стола\")\n\n def RestartThree(self):\n #self.playerStatus[2][0] = chess.Board() \n self.playerStatus[2][4] = True\n\n self.timer.start(0)\n self.logs[2].append(\"Restart\\n\")\n self.Speaker(\"Перезапуск третьего стола\")\n\n def PauseOne(self):\n if self.playerStatus[0][5]==False:\n self.playerStatus[0][5]=True\n self.buttonsPause[0].setText('Unpause')\n self.logs[0].append(\"Pause\\n\")\n self.Speaker(\"Поставлена пауза на первом игроке\")\n elif self.playerStatus[0][5]==True:\n self.playerStatus[0][5]=False\n self.buttonsPause[0].setText('Pause')\n self.logs[0].append(\"Pause\\n\")\n self.Speaker(\"Игрок один Игра продолжается\")\n\n def PauseTwo(self):\n if self.playerStatus[1][5]==False:\n self.playerStatus[1][5]=True\n self.buttonsPause[1].setText('Unpause')\n self.logs[1].append(\"Pause\\n\")\n self.Speaker(\"Поставлена пауза на втором игроке\")\n elif self.playerStatus[1][5]==True:\n self.playerStatus[1][5]=False\n self.buttonsPause[1].setText('Pause')\n self.logs[1].append(\"Pause\\n\")\n self.Speaker(\"Игрок два Игра продолжается\")\n\n def PauseThree(self):\n if self.playerStatus[2][5]==False:\n self.playerStatus[2][5]=True\n self.buttonsPause[2].setText('Unpause')\n self.logs[2].append(\"Pause\\n\")\n self.Speaker(\"Поставлена пауза на третьем игроке\")\n elif self.playerStatus[2][5]==True:\n self.playerStatus[2][5]=False\n self.buttonsPause[2].setText('Pause')\n self.logs[2].append(\"Pause\\n\")\n self.Speaker(\"Игрок Три Игра продолжается\")\n\n def Speaker(self, text):\n speaker = win32com.client.Dispatch(\"SAPI.SpVoice\")\n speaker.Speak(text)\n\n def InitPhoto(self):\n\n if self.initDone or self.playerStatus[self.curInt][1]==0:\n self.Loop()\n #print('init done') \n elif self.initDone ==False and self.GenerateMoveDone[self.curInt]:\n\n makeFirstPhotoEvent = CheckBoard(self.curInt,self.playerStatus[self.curInt][0],self.robot,self.camera,1)\n makeFirstPhotoEvent.result[str].connect(self.initPhotoDone)\n makeFirstPhotoEvent.setObjectName('Init thread')\n makeFirstPhotoEvent.start()\n\n self.GenerateMoveDone[self.curInt] = False\n\n def ButtonPushed(self,player):\n pass\n #self.CheckPermission[player]=True\n def checkButtons(self):\n pass\n # print ('lets check button in game.py')\n # if self.robot.get_digital_in(dataStructers.InButtonOne) == True:\n # print ('Button for player one')\n # self.CheckPermission[0]=True\n # elif self.robot.get_digital_in(dataStructers.InButtonTwo) == True:\n # print ('button for player two')\n # self.CheckPermission[1]=True\n # elif self.robot.get_digital_in(dataStructers.InButtonThree) == True:\n # print ('button for player three')\n # self.CheckPermission[2]=True\n #def EmitSignal(self,event,signal, value):\n # buttonCheckEvent = event\n # if value == int:\n # buttonCheckEvent.doneSignal[int].connect(signal)\n # elif value == str:\n # buttonCheckEvent.doneSignal[str].connect(signal)\n # buttonCheckEvent.start()\n\n def GameLoop(self):\n\n self.playerStatus = []\n self.engine = ChessEngine(False)\n boards = []\n #ButtonCheckEvent = ButtonCheck(self.robot)\n #ButtonCheckEvent.doneSignal[int].connect(self.ButtonPushed)\n #ButtonCheckEvent.start()\n #EmitSignal(ButtonCheck(self.robot),ButtonPushed, int)\n\n\n\n for i in range(3):\n\n '''\n playerStatus[i][j]\n i = player from 1 to 3\n j = 0-Current board\n j = 1 player Type: Ai or a human (true or 1 for human)\n j = 2 difficalty for robot ai\n j = 3 difficluty for player AI\n j = 4 gameOver status (True or false)\n j = 5 pause\n \n '''\n try:\n filename = 'player'+str(i)+'.txt'\n file = open(filename,'r')\n fen = file.read()\n except FileNotFoundError:\n fen='rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'\n self.playerStatus.append([chess.Board(fen),self.players[i],self.difficulties[i],self.aiDifficulties[i],False,False])\n boards.append(chess.Board())\n self.Draw(self.playerStatus[i][0],i)\n\n self.curInt = 0\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.InitPhoto)\n self.timer.setInterval(300)\n self.timer.start(1)\n\n self.timer2 = QtCore.QTimer()\n self.timer2.timeout.connect(self.checkButtons)\n self.timer2.setInterval(500)\n self.timer2.start(1)\n\n\n\n\n\n #print('playersStats',self.playerStatus)\n# \n# self.timer = QtCore.QTimer()\n# self.timer.timeout.connect(self.Loop)\n# self.timer.setInterval(300)\n# self.timer.start(1) \n# \n \n \n \n \n \n \n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n game = MainGame(False,3)\n \n game.GameLoop()\n \n sys.exit(app.exec_())\n\n \n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":33148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}